1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-31 08:43:29 +00:00

Compare commits

...

16 Commits

Author SHA1 Message Date
Nick Craig-Wood
98d494411f s3: Attempt to fix auth problems #5468
Revert "s3: remove WebIdentityRoleProvider to fix crash on auth #5255"

This reverts commit e618ea83dd.
2021-08-11 11:47:24 +01:00
partev
bb6b44d199 DOC: "OS X" -> "macOS" 2021-08-10 10:12:30 +03:00
vinibali
88b35bc32d Update yandex.md
add mail subscription exception
2021-08-09 23:28:41 +03:00
Nathan Collins
c32d5dd1f3 fs: move with --ignore-existing will not delete skipped files - #5463 2021-08-01 17:46:45 +01:00
Greg Sadetsky
3d9da896d2 drive: fix instructions for auto config #5499 2021-08-01 15:17:07 +01:00
hota
839c20bb35 s3: add Wasabi's AP-Northeast endpoint info
* Wasabi starts to provide AP Northeast (Tokyo) endpoint for all customers, so add it to the list

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-08-01 14:56:52 +01:00
Nick Craig-Wood
7c58148840 Start v1.57.0-DEV development 2021-08-01 13:43:36 +01:00
Nick Craig-Wood
6545755758 sftp: remove spurious error message on --sftp-disable-concurrent-reads 2021-07-31 11:04:45 +01:00
Nick Craig-Wood
c86a55c798 vfs: fix duplicates on rename - fixes #5469
Before this change, if there was an existing file being uploaded when
a file was renamed on top of it, then both would be uploaded. This
causes a duplicate in Google Drive as both files get uploaded at the
same time. This was triggered reliably by LibreOffice saving doc
files.

This fix removes any duplicates in the upload queue on rename.
2021-07-30 19:31:02 +01:00
Nick Craig-Wood
1d280081d4 Add Mariano Absatz (git) to contributors 2021-07-30 19:31:02 +01:00
Nick Craig-Wood
f48cb5985f Add Justin Winokur (Jwink3101) to contributors 2021-07-30 19:31:02 +01:00
Ivan Andreev
55e766f4e8 mountlib: restore daemon mode after #5415 2021-07-29 13:35:04 +03:00
Alex Chen
63a24255f8 onedrive: handle HTTP 400 better in PublicLink() (#5419) 2021-07-27 17:55:57 +08:00
Cnly
bc74f0621e http: fix serve http exits directly after starting 2021-07-25 14:06:43 +01:00
Mariano Absatz (git)
f39a08c9d7 clarification of the process for creating custom client_id 2021-07-24 09:19:48 +03:00
Justin Winokur (Jwink3101)
675548070d fs/operations: add rmdirs -v output - fixes #5464 2021-07-24 09:16:23 +03:00
23 changed files with 232 additions and 34 deletions

View File

@@ -1 +1 @@
v1.56.0
v1.57.0

View File

@@ -1500,7 +1500,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, resp, err)
})
if err != nil {
fmt.Println(err)
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
}
return "", err
}

View File

@@ -26,6 +26,7 @@ import (
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
@@ -629,6 +630,10 @@ func init() {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint",
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
Help: "Wasabi AP Northeast endpoint",
Provider: "Wasabi",
}},
}, {
Name: "location_constraint",
@@ -1541,6 +1546,11 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
}),
ExpiryWindow: 3 * time.Minute,
},
// Pick up IAM role if we are in EKS
&stscreds.WebIdentityRoleProvider{
ExpiryWindow: 3 * time.Minute,
},
}
cred := credentials.NewChainCredentials(providers)

View File

@@ -429,10 +429,6 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
)
if f.opt.DisableConcurrentReads { // FIXME
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
}
return sftp.NewClientPipe(pr, pw, opts...)
}

View File

@@ -145,8 +145,8 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
VFSOpt: vfsflags.Opt,
}
err := mnt.Mount()
if err == nil {
daemonized, err := mnt.Mount()
if !daemonized && err == nil {
err = mnt.Wait()
}
if err != nil {
@@ -167,21 +167,21 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
}
// Mount the remote at mountpoint
func (m *MountPoint) Mount() (err error) {
func (m *MountPoint) Mount() (daemonized bool, err error) {
if err = m.CheckOverlap(); err != nil {
return err
return false, err
}
if err = m.CheckAllowings(); err != nil {
return err
return false, err
}
m.SetVolumeName(m.MountOpt.VolumeName)
// Start background task if --background is specified
// Start background task if --daemon is specified
if m.MountOpt.Daemon {
daemonized := startBackgroundMode()
daemonized = startBackgroundMode()
if daemonized {
return nil
return true, nil
}
}
@@ -189,9 +189,9 @@ func (m *MountPoint) Mount() (err error) {
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
return false, errors.Wrap(err, "failed to mount FUSE fs")
}
return nil
return false, nil
}
// CheckOverlap checks that root doesn't overlap with mountpoint

View File

@@ -270,7 +270,7 @@ func (vol *Volume) mount(id string) error {
return errors.New("volume filesystem is not ready")
}
if err := vol.mnt.Mount(); err != nil {
if _, err := vol.mnt.Mount(); err != nil {
return err
}
vol.mnt.MountedOn = time.Now()

View File

@@ -69,6 +69,7 @@ control the stats printing.
return err
}
s.Bind(router)
httplib.Wait()
return nil
})
},

View File

@@ -511,3 +511,5 @@ put them back in again.` >}}
* Michael Hanselmann <public@hansmi.ch>
* Chuan Zh <zhchuan7@gmail.com>
* Antoine GIRARD <antoine.girard@sapk.fr>
* Justin Winokur (Jwink3101) <Jwink3101@users.noreply.github.com>
* Mariano Absatz (git) <scm@baby.com.ar>

View File

@@ -18,7 +18,7 @@ FUSE.
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
On Linux and macOS, you can either run mount in foreground mode or background (daemon) mode.
Mount runs in foreground mode by default, use the `--daemon` flag to specify background mode.
You can only run mount in foreground mode on Windows.
@@ -47,7 +47,7 @@ When running in background mode the user will have to stop the mount manually:
# Linux
fusermount -u /path/to/local/mount
# OS X
# macOS
umount /path/to/local/mount
The umount operation can fail, for example when the mountpoint is busy.
@@ -238,7 +238,7 @@ Hubic) do not support the concept of empty directories, so empty
directories will have a tendency to disappear once they fall out of
the directory cache.
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
Only supported on Linux, FreeBSD, macOS and Windows at the moment.
## rclone mount vs rclone sync/copy
@@ -602,8 +602,8 @@ rclone mount remote:path /path/to/mountpoint [flags]
--no-checksum Don't compare checksums on up/download.
--no-modtime Don't read/write the modification time (can speed things up).
--no-seek Don't allow seeking in files.
--noappledouble Ignore Apple Double (._) and .DS_Store files. Supported on OSX only. (default true)
--noapplexattr Ignore all "com.apple.*" extended attributes. Supported on OSX only.
--noappledouble Ignore Apple Double (._) and .DS_Store files. Supported on macOS only. (default true)
--noapplexattr Ignore all "com.apple.*" extended attributes. Supported on macOS only.
-o, --option stringArray Option for libfuse/WinFsp. Repeat if required.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
--read-only Mount read-only.
@@ -621,7 +621,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
--volname string Set the volume name. Supported on Windows and OSX only.
--volname string Set the volume name. Supported on Windows and macOS only.
--write-back-cache Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used. Not supported on Windows.
```

View File

@@ -910,6 +910,10 @@ While this isn't a generally recommended option, it can be useful
in cases where your files change due to encryption. However, it cannot
correct partial transfers in case a transfer was interrupted.
When performing a `move`/`moveto` command, this flag will leave skipped
files in the source location unchanged when a file with the same name
exists on the destination.
### --ignore-size ###
Normally rclone will look at modification time and size of files to

View File

@@ -258,7 +258,7 @@ client_secret> # Can be left blank
scope> # Select your scope, 1 for example
root_folder_id> # Can be left blank
service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes!
y/n> # Auto config, y
y/n> # Auto config, n
```

View File

@@ -128,7 +128,7 @@ Client ID and Key by following the steps below:
1. Open https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade and then click `New registration`.
2. Enter a name for your app, choose account type `Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)`, select `Web` in `Redirect URI`, then type (do not copy and paste) `http://localhost:53682/` and click Register. Copy and keep the `Application (client) ID` under the app name for later use.
3. Under `manage` select `Certificates & secrets`, click `New client secret`. Copy and keep that secret for later use.
3. Under `manage` select `Certificates & secrets`, click `New client secret`. Enter a description (can be anything) and set `Expires` to 24 months. Copy and keep that secret _Value_ for later use (you _won't_ be able to see this value afterwards).
4. Under `manage` select `API permissions`, click `Add a permission` and select `Microsoft Graph` then select `delegated permissions`.
5. Search and select the following permissions: `Files.Read`, `Files.ReadWrite`, `Files.Read.All`, `Files.ReadWrite.All`, `offline_access`, `User.Read`. Once selected click `Add permissions` at the bottom.
@@ -582,3 +582,12 @@ Description: Due to a configuration change made by your administrator, or becaus
```
If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
#### Invalid request when making public links ####
On Sharepoint and OneDrive for Business, `rclone link` may return an "Invalid
request" error. A possible cause is that the organisation admin didn't allow
public links to be made for the organisation/sharepoint library. To fix the
permissions as an admin, take a look at the docs:
[1](https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off),
[2](https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3).

View File

@@ -1048,6 +1048,8 @@ Required when using an S3 clone.
- Wasabi US West endpoint
- "s3.eu-central-1.wasabisys.com"
- Wasabi EU Central endpoint
- "s3.ap-northeast-1.wasabisys.com"
- Wasabi AP Northeast endpoint
#### --s3-location-constraint

View File

@@ -124,6 +124,12 @@ to twice the max size of file in GiB should be enough, so if you want
to upload a 30 GiB file set a timeout of `2 * 30 = 60m`, that is
`--timeout 60m`.
Having a Yandex Mail account is mandatory to use the Yandex.Disk subscription.
Token generation will work without a mail account, but Rclone won't be able to complete any actions.
```
[403 - DiskUnsupportedUserAccountTypeError] User account type is not supported.
```
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/yandex/yandex.go then run make backenddocs" >}}
### Standard Options

View File

@@ -1 +1 @@
v1.56.0
v1.57.0

View File

@@ -1043,7 +1043,7 @@ func TryRmdir(ctx context.Context, f fs.Fs, dir string) error {
if SkipDestructive(ctx, fs.LogDirName(f, dir), "remove directory") {
return nil
}
fs.Debugf(fs.LogDirName(f, dir), "Removing directory")
fs.Infof(fs.LogDirName(f, dir), "Removing directory")
return f.Rmdir(ctx, dir)
}
@@ -1811,7 +1811,11 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
} else {
tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj)
if !cp {
err = DeleteFile(ctx, srcObj)
if ci.IgnoreExisting {
fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set")
} else {
err = DeleteFile(ctx, srcObj)
}
}
tr.Done(ctx, err)
}

View File

@@ -811,6 +811,32 @@ func TestMoveFile(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
}
func TestMoveFileWithIgnoreExisting(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("file1", "file1 contents", t1)
fstest.CheckItems(t, r.Flocal, file1)
ci.IgnoreExisting = true
err := operations.MoveFile(ctx, r.Fremote, r.Flocal, file1.Path, file1.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
fstest.CheckItems(t, r.Fremote, file1)
// Recreate file with updated content
file1b := r.WriteFile("file1", "file1 modified", t2)
fstest.CheckItems(t, r.Flocal, file1b)
// Ensure modified file did not transfer and was not deleted
err = operations.MoveFile(ctx, r.Fremote, r.Flocal, file1.Path, file1b.Path)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1b)
fstest.CheckItems(t, r.Fremote, file1)
}
func TestCaseInsensitiveMoveFile(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)

View File

@@ -354,6 +354,8 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
// Delete src if no error on copy
if operations.SameObject(src, pair.Dst) {
fs.Logf(src, "Not removing source file as it is the same file as the destination")
} else if s.ci.IgnoreExisting {
fs.Debugf(src, "Not removing source file as destination file exists and --ignore-existing is set")
} else {
s.processError(operations.DeleteFile(s.ctx, src))
}

View File

@@ -1342,6 +1342,65 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2)
}
func TestMoveWithIgnoreExisting(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("existing", "potato", t1)
file2 := r.WriteFile("existing-b", "tomato", t1)
ci.IgnoreExisting = true
accounting.GlobalStats().ResetCounters()
err := MoveDir(ctx, r.Fremote, r.Flocal, false, false)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
t,
r.Flocal,
[]fstest.Item{},
[]string{},
fs.GetModifyWindow(ctx, r.Flocal),
)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
file2,
},
[]string{},
fs.GetModifyWindow(ctx, r.Fremote),
)
// Recreate first file with modified content
file1b := r.WriteFile("existing", "newpotatoes", t2)
accounting.GlobalStats().ResetCounters()
err = MoveDir(ctx, r.Fremote, r.Flocal, false, false)
require.NoError(t, err)
// Source items should still exist in modified state
fstest.CheckListingWithPrecision(
t,
r.Flocal,
[]fstest.Item{
file1b,
},
[]string{},
fs.GetModifyWindow(ctx, r.Flocal),
)
// Dest items should not have changed
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
file2,
},
[]string{},
fs.GetModifyWindow(ctx, r.Fremote),
)
}
// Test a server-side move if possible, or the backup path if not
func TestServerSideMove(t *testing.T) {
ctx := context.Background()

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
var Version = "v1.56.0-DEV"
var Version = "v1.57.0-DEV"

View File

@@ -231,6 +231,11 @@ func (s *server) Serve() {
}
}
// Wait blocks while the server is serving requests
func (s *server) Wait() {
s.closing.Wait()
}
// Router returns the server base router
func (s *server) Router() chi.Router {
return s.baseRouter
@@ -291,6 +296,11 @@ func Restart() error {
return start()
}
// Wait blocks while the default http server is serving requests
func Wait() {
defaultServer.Wait()
}
// Start the default server
func start() error {
defaultServerMutex.Lock()

View File

@@ -276,13 +276,12 @@ func (wb *WriteBack) Add(id Handle, name string, modified bool, putFn PutFn) Han
return wbItem.id
}
// Remove should be called when a file should be removed from the
// _remove should be called when a file should be removed from the
// writeback queue. This cancels a writeback if there is one and
// doesn't return the item to the queue.
func (wb *WriteBack) Remove(id Handle) (found bool) {
wb.mu.Lock()
defer wb.mu.Unlock()
//
// This should be called with the lock held
func (wb *WriteBack) _remove(id Handle) (found bool) {
wbItem, found := wb.lookup[id]
if found {
fs.Debugf(wbItem.name, "vfs cache: cancelling writeback (uploading %v) %p item %d", wbItem.uploading, wbItem, wbItem.id)
@@ -299,6 +298,16 @@ func (wb *WriteBack) Remove(id Handle) (found bool) {
return found
}
// Remove should be called when a file should be removed from the
// writeback queue. This cancels a writeback if there is one and
// doesn't return the item to the queue.
func (wb *WriteBack) Remove(id Handle) (found bool) {
wb.mu.Lock()
defer wb.mu.Unlock()
return wb._remove(id)
}
// Rename should be called when a file might be uploading and it gains
// a new name. This will cancel the upload and put it back in the
// queue.
@@ -314,6 +323,15 @@ func (wb *WriteBack) Rename(id Handle, name string) {
// We are uploading already so cancel the upload
wb._cancelUpload(wbItem)
}
// Check to see if there are any uploads with the existing
// name and remove them
for existingID, existingItem := range wb.lookup {
if existingID != id && existingItem.name == name {
wb._remove(existingID)
}
}
wbItem.name = name
// Kick the timer on
wb.items._update(wbItem, wb._newExpiry())

View File

@@ -585,6 +585,53 @@ func TestWriteBackRename(t *testing.T) {
assert.Equal(t, wbItem.name, "three")
}
// TestWriteBackRenameDuplicates checks that if we rename an entry and
// make a duplicate, we remove the duplicate.
func TestWriteBackRenameDuplicates(t *testing.T) {
wb, cancel := newTestWriteBack(t)
defer cancel()
// add item "one"
pi1 := newPutItem(t)
id1 := wb.Add(0, "one", true, pi1.put)
wbItem1 := wb.lookup[id1]
checkOnHeap(t, wb, wbItem1)
checkInLookup(t, wb, wbItem1)
assert.Equal(t, wbItem1.name, "one")
<-pi1.started
checkNotOnHeap(t, wb, wbItem1)
checkInLookup(t, wb, wbItem1)
// add item "two"
pi2 := newPutItem(t)
id2 := wb.Add(0, "two", true, pi2.put)
wbItem2 := wb.lookup[id2]
checkOnHeap(t, wb, wbItem2)
checkInLookup(t, wb, wbItem2)
assert.Equal(t, wbItem2.name, "two")
<-pi2.started
checkNotOnHeap(t, wb, wbItem2)
checkInLookup(t, wb, wbItem2)
// rename "two" to "one"
wb.Rename(id2, "one")
// check "one" is cancelled and removed from heap and lookup
checkNotOnHeap(t, wb, wbItem1)
checkNotInLookup(t, wb, wbItem1)
assert.True(t, pi1.cancelled)
assert.Equal(t, wbItem1.name, "one")
// check "two" (now called "one"!) has been cancelled and will
// be retried
checkOnHeap(t, wb, wbItem2)
checkInLookup(t, wb, wbItem2)
assert.True(t, pi2.cancelled)
assert.Equal(t, wbItem2.name, "one")
}
func TestWriteBackCancelUpload(t *testing.T) {
wb, cancel := newTestWriteBack(t)
defer cancel()