mirror of
https://github.com/rclone/rclone.git
synced 2026-01-25 05:43:21 +00:00
Compare commits
1 Commits
fix-freebs
...
fix-3740-m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c48424c0eb |
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
@@ -211,7 +211,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
|
||||
@@ -326,17 +326,6 @@ Photos folder" option in your google drive settings. You can then copy
|
||||
or move the photos locally and use the date the image was taken
|
||||
(created) set as the modification date.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_shared_date",
|
||||
Default: false,
|
||||
Help: `Use date file was shared instead of modified date.
|
||||
|
||||
Note that, as with "--drive-use-created-date", this flag may have
|
||||
unexpected consequences when uploading/downloading files.
|
||||
|
||||
If both this flag and "--drive-use-created-date" are set, the created
|
||||
date is used.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
@@ -474,7 +463,6 @@ type Options struct {
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
AllowImportNameChange bool `config:"allow_import_name_change"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
@@ -706,9 +694,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
@@ -1110,8 +1095,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
modifiedDate := info.ModifiedTime
|
||||
if f.opt.UseCreatedDate {
|
||||
modifiedDate = info.CreatedTime
|
||||
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
|
||||
modifiedDate = info.SharedWithMeTime
|
||||
}
|
||||
size := info.Size
|
||||
if f.opt.SizeAsQuota {
|
||||
@@ -1480,14 +1463,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -1625,7 +1600,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
mu.Lock()
|
||||
@@ -1638,7 +1612,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||
}
|
||||
}
|
||||
listed++
|
||||
return list.Add(entry)
|
||||
}
|
||||
|
||||
@@ -1695,21 +1668,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return err
|
||||
}
|
||||
|
||||
err = list.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// itemToDirEntry converts a drive.File to a fs.DirEntry.
|
||||
@@ -2082,30 +2041,9 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// teamDriveOK checks to see if we can access the team drive
|
||||
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
||||
if !f.isTeamDrive {
|
||||
return nil
|
||||
}
|
||||
var td *drive.Drive
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get Team/Shared Drive info")
|
||||
}
|
||||
fs.Debugf(f, "read info from team drive %q", td.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if f.isTeamDrive {
|
||||
err := f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Teamdrives don't appear to have a usage API so just return empty
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
|
||||
@@ -46,26 +46,13 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
Username string `json:"username"`
|
||||
Realm string `json:"realm"`
|
||||
WellKnownLink string `json:"well_known_link"`
|
||||
AuthToken string `json:"auth_token"`
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
RefreshExpiresIn int32 `json:"refresh_expires_in"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
NotBeforePolicy int32 `json:"not-before-policy"`
|
||||
SessionState string `json:"session_state"`
|
||||
Scope string `json:"scope"`
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
// JSON structures returned by new API
|
||||
|
||||
@@ -4,13 +4,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -26,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
@@ -41,25 +41,29 @@ const enc = encodings.JottaCloud
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configVersion = 1
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
ClientID: "jottacli",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
@@ -77,39 +81,43 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm(false) {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
refresh = ver != configVersion
|
||||
} else {
|
||||
refresh = true
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
|
||||
if refresh {
|
||||
fmt.Printf("Config outdated - refreshing\n")
|
||||
} else {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
}
|
||||
}
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
clientConfig := *fs.Config
|
||||
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
|
||||
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
token, err := doAuth(ctx, srv, loginToken)
|
||||
token, err := doAuth(ctx, srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
@@ -135,8 +143,6 @@ func init() {
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
@@ -243,51 +249,67 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
var loginToken api.LoginToken
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
// we don't seem to need any data from this link but the API is not happy if skip it
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: loginToken.WellKnownLink,
|
||||
NoResponse: true,
|
||||
}
|
||||
_, err = srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return token, err
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("client_id", "jottacli")
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Body: strings.NewReader(values.Encode()),
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
@@ -449,6 +471,29 @@ func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
@@ -459,23 +504,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
var version string
|
||||
if version, ok = m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = ver == configVersion
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
|
||||
@@ -820,10 +820,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -715,16 +715,6 @@ file you can stream upload is 48GB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
@@ -819,7 +809,6 @@ type Options struct {
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
@@ -1664,7 +1653,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
|
||||
if srcSize >= int64(f.opt.CopyCutoff) {
|
||||
if srcSize >= int64(f.opt.UploadCutoff) {
|
||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@@ -1677,8 +1666,8 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
|
||||
start := partIndex * partSize
|
||||
var ends string
|
||||
if partIndex == numParts-1 {
|
||||
if totalSize >= 1 {
|
||||
ends = strconv.FormatInt(totalSize-1, 10)
|
||||
if totalSize >= 0 {
|
||||
ends = strconv.FormatInt(totalSize, 10)
|
||||
}
|
||||
} else {
|
||||
ends = strconv.FormatInt(start+partSize-1, 10)
|
||||
@@ -1715,7 +1704,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
|
||||
}
|
||||
}()
|
||||
|
||||
partSize := int64(f.opt.CopyCutoff)
|
||||
partSize := int64(f.opt.ChunkSize)
|
||||
numParts := (srcSize-1)/partSize + 1
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
@@ -1943,6 +1932,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
if o.bytes >= maxSizeForCopy {
|
||||
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Can't update metadata here, so return this error to force a recopy
|
||||
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
@@ -2046,7 +2040,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart && !o.fs.opt.DisableChecksum {
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
@@ -156,11 +156,6 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Default: false,
|
||||
Help: "Set to skip any symlinks and any other non regular files.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -182,7 +177,6 @@ type Options struct {
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -606,16 +600,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
remote := path.Join(dir, info.Name())
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() && !info.IsDir() {
|
||||
if f.opt.SkipLinks {
|
||||
// skip non regular file if SkipLinks is set
|
||||
continue
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -953,8 +952,8 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
return o.hasHeader("X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isInContainerVersioning(container string) (bool, error) {
|
||||
_, headers, err := o.fs.c.Container(container)
|
||||
func (o *Object) isInContainerVersioning() (bool, error) {
|
||||
_, headers, err := o.fs.c.Container(o.fs.root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -1131,10 +1130,6 @@ func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
dirManifest, err = url.PathUnescape(dirManifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
|
||||
@@ -1346,7 +1341,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
// ...then segments if required
|
||||
if isDynamicLargeObject {
|
||||
isInContainerVersioning, err := o.isInContainerVersioning(container)
|
||||
isInContainerVersioning, err := o.isInContainerVersioning()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -113,8 +113,7 @@ type Fs struct {
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
hasChecksums bool // set if can use owncloud style checksums
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -216,7 +215,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -384,7 +383,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// sets the BearerToken up
|
||||
func (f *Fs) setBearerToken(token string) {
|
||||
f.opt.BearerToken = token
|
||||
f.srv.SetHeader("Authorization", "Bearer "+token)
|
||||
f.srv.SetHeader("Authorization", "BEARER "+token)
|
||||
}
|
||||
|
||||
// fetch the bearer token using the command
|
||||
@@ -431,12 +430,11 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasMD5 = true
|
||||
f.hasSHA1 = true
|
||||
f.hasChecksums = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasSHA1 = true
|
||||
f.hasChecksums = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -538,7 +536,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
if f.hasChecksums {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -947,14 +945,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
hashes := hash.Set(hash.None)
|
||||
if f.hasMD5 {
|
||||
hashes.Add(hash.MD5)
|
||||
if f.hasChecksums {
|
||||
return hash.NewHashSet(hash.MD5, hash.SHA1)
|
||||
}
|
||||
if f.hasSHA1 {
|
||||
hashes.Add(hash.SHA1)
|
||||
}
|
||||
return hashes
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
@@ -1021,11 +1015,13 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == hash.MD5 && o.fs.hasMD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.SHA1 && o.fs.hasSHA1 {
|
||||
return o.sha1, nil
|
||||
if o.fs.hasChecksums {
|
||||
switch t {
|
||||
case hash.SHA1:
|
||||
return o.sha1, nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1046,14 +1042,10 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
if o.fs.hasChecksums {
|
||||
hashes := info.Hashes()
|
||||
if o.fs.hasSHA1 {
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
}
|
||||
if o.fs.hasMD5 {
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1134,21 +1126,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
}
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasSHA1 {
|
||||
if o.fs.hasChecksums {
|
||||
// Set an upload checksum - prefer SHA1
|
||||
//
|
||||
// This is used as an upload integrity test. If we set
|
||||
// only SHA1 here, owncloud will calculate the MD5 too.
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
}
|
||||
}
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,11 +46,10 @@ __rclone_custom_func() {
|
||||
else
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
local rclone=(command rclone --ask-password=false)
|
||||
if [[ $cur != *:* ]]; then
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local remotes=($("${rclone[@]}" listremotes 2> /dev/null))
|
||||
local remotes=($(command rclone listremotes))
|
||||
IFS=$ifs
|
||||
local remote
|
||||
for remote in "${remotes[@]}"; do
|
||||
@@ -69,7 +68,7 @@ __rclone_custom_func() {
|
||||
fi
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local lines=($("${rclone[@]}" lsf "${cur%%:*}:$prefix" 2> /dev/null))
|
||||
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
|
||||
IFS=$ifs
|
||||
local line
|
||||
for line in "${lines[@]}"; do
|
||||
|
||||
@@ -263,7 +263,7 @@ Contributors
|
||||
* garry415 <garry.415@gmail.com>
|
||||
* forgems <forgems@gmail.com>
|
||||
* Florian Apolloner <florian@apolloner.eu>
|
||||
* Aleksandar Janković <office@ajankovic.com> <ajankovic@users.noreply.github.com>
|
||||
* Aleksandar Jankovic <office@ajankovic.com>
|
||||
* Maran <maran@protonmail.com>
|
||||
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
|
||||
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
|
||||
@@ -313,6 +313,3 @@ Contributors
|
||||
* Marco Molteni <marco.molteni@mailbox.org>
|
||||
* Ankur Gupta <ankur0493@gmail.com>
|
||||
* Maciej Zimnoch <maciej@scylladb.com>
|
||||
* anuar45 <serdaliyev.anuar@gmail.com>
|
||||
* Fernando <ferferga@users.noreply.github.com>
|
||||
* David Cole <david.cole@sohonet.com>
|
||||
|
||||
@@ -65,28 +65,6 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
|
||||
which creates drives accessible for everyone on the system or
|
||||
alternatively using [the nssm service manager](https://nssm.cc/usage).
|
||||
|
||||
#### Mount as a network drive
|
||||
|
||||
By default, rclone will mount the remote as a normal drive. However, you can also mount it as a **Network Drive**
|
||||
(or **Network Share**, as mentioned in some places)
|
||||
|
||||
Unlike other systems, Windows provides a different filesystem type for network drives.
|
||||
Windows and other programs treat the network drives and fixed/removable drives differently:
|
||||
In network drives, many I/O operations are optimized, as the high latency and low reliability
|
||||
(compared to a normal drive) of a network is expected.
|
||||
|
||||
Although many people prefer network shares to be mounted as normal system drives, this might cause
|
||||
some issues, such as programs not working as expected or freezes and errors while operating with the
|
||||
mounted remote in Windows Explorer. If you experience any of those, consider mounting rclone remotes as network shares,
|
||||
as Windows expects normal drives to be fast and reliable, while cloud storage is far from that.
|
||||
See also [Limitations](#limitations) section below for more info
|
||||
|
||||
Add `--fuse-flag --VolumePrefix=\server\share` to your `mount` command, **replacing `share` with any other
|
||||
name of your choice if you are mounting more than one remote**. Otherwise, the mountpoints will conflict and
|
||||
your mounted filesystems will overlap.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of "--vfs-cache-mode" this can only write files
|
||||
|
||||
@@ -99,7 +99,7 @@ Or instead of htpassword if you just want a single user and password:
|
||||
|
||||
The GUI is being developed in the: [rclone/rclone-webui-react respository](https://github.com/rclone/rclone-webui-react).
|
||||
|
||||
Bug reports and contributions are very welcome :-)
|
||||
Bug reports and contributions very welcome welcome :-)
|
||||
|
||||
If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/).
|
||||
|
||||
|
||||
@@ -56,14 +56,7 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
|
||||
## macOS installation with brew ##
|
||||
|
||||
brew install rclone
|
||||
|
||||
## macOS installation from precompiled binary, using curl ##
|
||||
|
||||
To avoid problems with macOS gatekeeper enforcing the binary to be signed and
|
||||
notarized it is enough to download with `curl`.
|
||||
## macOS installation from precompiled binary ##
|
||||
|
||||
Download the latest version of rclone.
|
||||
|
||||
@@ -88,19 +81,6 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
|
||||
|
||||
rclone config
|
||||
|
||||
## macOS installation from precompiled binary, using a web browser ##
|
||||
|
||||
When downloading a binary with a web browser, the browser will set the macOS
|
||||
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
|
||||
`rclone`, a pop-up will appear saying:
|
||||
|
||||
“rclone” cannot be opened because the developer cannot be verified.
|
||||
macOS cannot verify that this app is free from malware.
|
||||
|
||||
The simplest fix is to run
|
||||
|
||||
xattr -d com.apple.quarantine rclone
|
||||
|
||||
## Install with docker ##
|
||||
|
||||
The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).
|
||||
|
||||
@@ -11,7 +11,7 @@ Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
To configure Jottacloud you will need to generate a personal security token in the Jottacloud web inteface. You will the option to do in your [account security settings](https://www.jottacloud.com/web/secure). Note that the web inteface may refer to this token as a JottaCli token.
|
||||
To configure Jottacloud you will need to enter your username and password and select a mountpoint.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
@@ -42,8 +42,16 @@ n) No
|
||||
y/n> n
|
||||
Remote config
|
||||
|
||||
Generate a personal login token here: https://www.jottacloud.com/web/secure
|
||||
Login Token> <your token here>
|
||||
Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.
|
||||
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
Username> 0xC4KE@gmail.com
|
||||
Your Jottacloud password is only required during setup and will not be stored.
|
||||
password:
|
||||
|
||||
Do you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?
|
||||
|
||||
@@ -66,10 +74,11 @@ Mountpoints> 1
|
||||
[jotta]
|
||||
type = jottacloud
|
||||
user = 0xC4KE@gmail.com
|
||||
client_id = .....
|
||||
client_secret = ........
|
||||
token = {........}
|
||||
device = Jotta
|
||||
mountpoint = Archive
|
||||
configVersion = 1
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
@@ -93,7 +102,7 @@ To copy a local directory to an Jottacloud directory called backup
|
||||
### Devices and Mountpoints ###
|
||||
|
||||
The official Jottacloud client registers a device for each computer you install it on and then creates a mountpoint for each folder you select for Backup.
|
||||
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by any of the official clients rclone provides the option to select other devices and mountpoints during config.
|
||||
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by the official rclone provides the option to select other devices and mountpoints during config.
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
|
||||
@@ -292,7 +292,7 @@ func (s *StatsInfo) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
|
||||
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
|
||||
dateString,
|
||||
fs.SizeSuffix(s.bytes),
|
||||
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
||||
@@ -313,23 +313,16 @@ func (s *StatsInfo) String() string {
|
||||
errorDetails = " (no need to retry)"
|
||||
}
|
||||
|
||||
// Add only non zero stats
|
||||
if s.errors != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
|
||||
s.errors, errorDetails)
|
||||
}
|
||||
if s.checks != 0 || totalChecks != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
|
||||
s.errors, totalChecks, percent(s.checks, totalChecks))
|
||||
}
|
||||
if s.deletes != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Deleted: %10d\n", s.deletes)
|
||||
}
|
||||
if s.transfers != 0 || totalTransfer != 0 {
|
||||
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
|
||||
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
|
||||
}
|
||||
_, _ = fmt.Fprintf(buf, "Elapsed time: %10v\n", dtRounded)
|
||||
_, _ = fmt.Fprintf(buf, `
|
||||
Errors: %10d%s
|
||||
Checks: %10d / %d, %s
|
||||
Transferred: %10d / %d, %s
|
||||
Elapsed time: %10v
|
||||
`,
|
||||
s.errors, errorDetails,
|
||||
s.checks, totalChecks, percent(s.checks, totalChecks),
|
||||
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
|
||||
dtRounded)
|
||||
}
|
||||
|
||||
// checking and transferring have their own locking so unlock
|
||||
|
||||
@@ -178,53 +178,6 @@ func IsNoRetryError(err error) (isNoRetry bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// NoLowLevelRetrier is an optional interface for error as to whether
|
||||
// the operation should not be retried at a low level.
|
||||
//
|
||||
// NoLowLevelRetry errors won't be retried by low level retry loops.
|
||||
type NoLowLevelRetrier interface {
|
||||
error
|
||||
NoLowLevelRetry() bool
|
||||
}
|
||||
|
||||
// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the
|
||||
// NoLowLevelRetrier interface and return true
|
||||
type wrappedNoLowLevelRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NoLowLevelRetry interface
|
||||
func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
|
||||
|
||||
// NoLowLevelRetryError makes an error which indicates the sync
|
||||
// shouldn't be low level retried.
|
||||
func NoLowLevelRetryError(err error) error {
|
||||
return wrappedNoLowLevelRetryError{err}
|
||||
}
|
||||
|
||||
// Cause returns the underlying error
|
||||
func (err wrappedNoLowLevelRetryError) Cause() error {
|
||||
return err.error
|
||||
}
|
||||
|
||||
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
|
||||
// interface and calling the NoLowLevelRetry method returns true.
|
||||
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
|
||||
errors.Walk(err, func(err error) bool {
|
||||
if r, ok := err.(NoLowLevelRetrier); ok {
|
||||
isNoLowLevelRetry = r.NoLowLevelRetry()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// RetryAfter is an optional interface for error as to whether the
|
||||
// operation should be retried after a given delay
|
||||
//
|
||||
@@ -392,11 +345,6 @@ func ShouldRetry(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// If error has been marked to NoLowLevelRetry then don't retry
|
||||
if IsNoLowLevelRetryError(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find root cause if available
|
||||
retriable, err := Cause(err)
|
||||
if retriable {
|
||||
|
||||
@@ -211,17 +211,11 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, er
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "find duplicate dirs")
|
||||
}
|
||||
// make sure parents are before children
|
||||
duplicateNames := []string{}
|
||||
for name, ds := range dirs {
|
||||
if len(ds) > 1 {
|
||||
duplicateNames = append(duplicateNames, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(duplicateNames)
|
||||
duplicateDirs := [][]fs.Directory{}
|
||||
for _, name := range duplicateNames {
|
||||
duplicateDirs = append(duplicateDirs, dirs[name])
|
||||
for _, ds := range dirs {
|
||||
if len(ds) > 1 {
|
||||
duplicateDirs = append(duplicateDirs, ds)
|
||||
}
|
||||
}
|
||||
return duplicateDirs, nil
|
||||
}
|
||||
@@ -241,8 +235,7 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
|
||||
fs.Infof(dirs[0], "Merging contents of duplicate directories")
|
||||
err := mergeDirs(ctx, dirs)
|
||||
if err != nil {
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(nil, "merge duplicate dirs: %v", err)
|
||||
return errors.Wrap(err, "merge duplicate dirs")
|
||||
}
|
||||
} else {
|
||||
fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
|
||||
@@ -258,16 +251,23 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
|
||||
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
|
||||
fs.Infof(f, "Looking for duplicates using %v mode.", mode)
|
||||
|
||||
// Find duplicate directories first and fix them
|
||||
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(duplicateDirs) != 0 {
|
||||
// Find duplicate directories first and fix them - repeat
|
||||
// until all fixed
|
||||
for {
|
||||
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(duplicateDirs) == 0 {
|
||||
break
|
||||
}
|
||||
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fs.Config.DryRun {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// find a hash to use
|
||||
@@ -275,7 +275,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
|
||||
|
||||
// Now find duplicate files
|
||||
files := map[string][]fs.Object{}
|
||||
err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
remote := o.Remote()
|
||||
files[remote] = append(files[remote], o)
|
||||
|
||||
@@ -721,9 +721,6 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
|
||||
atomic.AddInt32(&c.srcFilesMissing, 1)
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
if c.oneway {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// reOpen is a wrapper for an object reader which reopens the stream on error
|
||||
@@ -105,7 +104,7 @@ func (h *reOpen) Read(p []byte) (n int, err error) {
|
||||
h.err = err
|
||||
}
|
||||
h.read += int64(n)
|
||||
if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {
|
||||
if err != nil && err != io.EOF {
|
||||
// close underlying stream
|
||||
h.opened = false
|
||||
_ = h.rc.Close()
|
||||
|
||||
2
vendor/bazil.org/fuse/fuse.go
generated
vendored
2
vendor/bazil.org/fuse/fuse.go
generated
vendored
@@ -1004,7 +1004,7 @@ loop:
|
||||
}
|
||||
|
||||
case opBmap:
|
||||
goto unrecognized
|
||||
panic("opBmap")
|
||||
|
||||
case opDestroy:
|
||||
req = &DestroyRequest{
|
||||
|
||||
8
vendor/github.com/t3rm1n4l/go-mega/mega.go
generated
vendored
8
vendor/github.com/t3rm1n4l/go-mega/mega.go
generated
vendored
@@ -750,7 +750,10 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) {
|
||||
}
|
||||
// Shared file
|
||||
default:
|
||||
k := m.FS.skmap[args[0]]
|
||||
k, ok := m.FS.skmap[args[0]]
|
||||
if !ok {
|
||||
return nil, errors.New("couldn't find decryption key for shared file")
|
||||
}
|
||||
b, err := base64urldecode(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -921,7 +924,8 @@ func (m *Mega) getFileSystem() error {
|
||||
for _, itm := range res[0].F {
|
||||
_, err = m.addFSNode(itm)
|
||||
if err != nil {
|
||||
return err
|
||||
m.debugf("couldn't decode FSNode %#v: %v ", itm, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,19 +37,13 @@ type File struct {
|
||||
}
|
||||
|
||||
// newFile creates a new File
|
||||
//
|
||||
// o may be nil
|
||||
func newFile(d *Dir, o fs.Object, leaf string) *File {
|
||||
f := &File{
|
||||
return &File{
|
||||
d: d,
|
||||
o: o,
|
||||
leaf: leaf,
|
||||
inode: newInode(),
|
||||
}
|
||||
if o != nil {
|
||||
f.size = o.Size()
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
|
||||
@@ -21,18 +21,16 @@ func cleanup(t *testing.T, r *fstest.Run, vfs *VFS) {
|
||||
r.Finalise()
|
||||
}
|
||||
|
||||
// Create a file and open it with the flags passed in
|
||||
func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename string, flags int) (*VFS, *RWFileHandle) {
|
||||
// Open a file for write
|
||||
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
|
||||
opt := DefaultOpt
|
||||
opt.CacheMode = CacheModeFull
|
||||
vfs := New(r.Fremote, &opt)
|
||||
|
||||
if create {
|
||||
file1 := r.WriteObject(context.Background(), filename, "0123456789abcdef", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
}
|
||||
file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
h, err := vfs.OpenFile(filename, flags, 0777)
|
||||
h, err := vfs.OpenFile("dir/file1", os.O_RDONLY, 0777)
|
||||
require.NoError(t, err)
|
||||
fh, ok := h.(*RWFileHandle)
|
||||
require.True(t, ok)
|
||||
@@ -40,14 +38,18 @@ func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename stri
|
||||
return vfs, fh
|
||||
}
|
||||
|
||||
// Open a file for read
|
||||
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
|
||||
return rwHandleCreateFlags(t, r, true, "dir/file1", os.O_RDONLY)
|
||||
}
|
||||
|
||||
// Open a file for write
|
||||
func rwHandleCreateWriteOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
|
||||
return rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE)
|
||||
opt := DefaultOpt
|
||||
opt.CacheMode = CacheModeFull
|
||||
vfs := New(r.Fremote, &opt)
|
||||
|
||||
h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
|
||||
require.NoError(t, err)
|
||||
fh, ok := h.(*RWFileHandle)
|
||||
require.True(t, ok)
|
||||
|
||||
return vfs, fh
|
||||
}
|
||||
|
||||
// read data from the string
|
||||
@@ -492,96 +494,6 @@ func TestRWFileHandleReleaseWrite(t *testing.T) {
|
||||
assert.True(t, fh.closed)
|
||||
}
|
||||
|
||||
// check the size of the file through the open file (if not nil) and via stat
|
||||
func assertSize(t *testing.T, vfs *VFS, fh *RWFileHandle, filepath string, size int64) {
|
||||
if fh != nil {
|
||||
assert.Equal(t, size, fh.Size())
|
||||
}
|
||||
fi, err := vfs.Stat(filepath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, size, fi.Size())
|
||||
}
|
||||
|
||||
func TestRWFileHandleSizeTruncateExisting(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_TRUNC)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// check initial size after opening
|
||||
assertSize(t, vfs, fh, "dir/file1", 0)
|
||||
|
||||
// write some bytes
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// check size after writing
|
||||
assertSize(t, vfs, fh, "dir/file1", 5)
|
||||
|
||||
// close
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// check size after close
|
||||
assertSize(t, vfs, nil, "dir/file1", 5)
|
||||
}
|
||||
|
||||
func TestRWFileHandleSizeCreateExisting(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_CREATE)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// check initial size after opening
|
||||
assertSize(t, vfs, fh, "dir/file1", 16)
|
||||
|
||||
// write some bytes
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// check size after writing
|
||||
assertSize(t, vfs, fh, "dir/file1", 16)
|
||||
|
||||
// write some more bytes
|
||||
n, err = fh.Write([]byte("helloHELLOhello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 15, n)
|
||||
|
||||
// check size after writing
|
||||
assertSize(t, vfs, fh, "dir/file1", 20)
|
||||
|
||||
// close
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// check size after close
|
||||
assertSize(t, vfs, nil, "dir/file1", 20)
|
||||
}
|
||||
|
||||
func TestRWFileHandleSizeCreateNew(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
vfs, fh := rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE)
|
||||
defer cleanup(t, r, vfs)
|
||||
|
||||
// check initial size after opening
|
||||
assertSize(t, vfs, fh, "file1", 0)
|
||||
|
||||
// write some bytes
|
||||
n, err := fh.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
// check size after writing
|
||||
assertSize(t, vfs, fh, "file1", 5)
|
||||
|
||||
// check size after writing
|
||||
assertSize(t, vfs, fh, "file1", 5)
|
||||
|
||||
// close
|
||||
assert.NoError(t, fh.Close())
|
||||
|
||||
// check size after close
|
||||
assertSize(t, vfs, nil, "file1", 5)
|
||||
}
|
||||
|
||||
func testRWFileHandleOpenTest(t *testing.T, vfs *VFS, test *openTest) {
|
||||
fileName := "open-test-file"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user