1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-22 20:33:17 +00:00

Compare commits

...

10 Commits

Author SHA1 Message Date
Nick Craig-Wood
97ade36d8c sftp: add extra debuging if --dump headers is used 2021-07-31 11:52:34 +01:00
Nick Craig-Wood
6545755758 sftp: remove spurious error message on --sftp-disable-concurrent-reads 2021-07-31 11:04:45 +01:00
Nick Craig-Wood
c86a55c798 vfs: fix duplicates on rename - fixes #5469
Before this change, if there was an existing file being uploaded when
a file was renamed on top of it, then both would be uploaded. This
causes a duplicate in Google Drive as both files get uploaded at the
same time. This was triggered reliably by LibreOffice saving doc
files.

This fix removes any duplicates in the upload queue on rename.
2021-07-30 19:31:02 +01:00
Nick Craig-Wood
1d280081d4 Add Mariano Absatz (git) to contributors 2021-07-30 19:31:02 +01:00
Nick Craig-Wood
f48cb5985f Add Justin Winokur (Jwink3101) to contributors 2021-07-30 19:31:02 +01:00
Ivan Andreev
55e766f4e8 mountlib: restore daemon mode after #5415 2021-07-29 13:35:04 +03:00
Alex Chen
63a24255f8 onedrive: handle HTTP 400 better in PublicLink() (#5419) 2021-07-27 17:55:57 +08:00
Cnly
bc74f0621e http: fix serve http exits directly after starting 2021-07-25 14:06:43 +01:00
Mariano Absatz (git)
f39a08c9d7 clarification of the process for creating custom client_id 2021-07-24 09:19:48 +03:00
Justin Winokur (Jwink3101)
675548070d fs/operations: add rmdirs -v output - fixes #5464 2021-07-24 09:16:23 +03:00
11 changed files with 149 additions and 27 deletions

View File

@@ -1500,7 +1500,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, resp, err)
})
if err != nil {
fmt.Println(err)
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
}
return "", err
}

View File

@@ -313,6 +313,13 @@ type Object struct {
sha1sum *string // Cached SHA1 checksum
}
// debugf calls fs.Debugf if --dump bodies or --dump headers is set
func (f *Fs) debugf(o interface{}, text string, args ...interface{}) {
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
fs.Debugf(o, text, args...)
}
}
// dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client.
@@ -429,10 +436,6 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
)
if f.opt.DisableConcurrentReads { // FIXME
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
}
return sftp.NewClientPipe(pr, pw, opts...)
}
@@ -768,7 +771,9 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
f.debugf(f, "> Getwd")
cwd, err := c.sftpClient.Getwd()
f.debugf(f, "< Getwd: %q, err=%#v", cwd, err)
f.putSftpConnection(&c, nil)
if err != nil {
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
@@ -849,7 +854,9 @@ func (f *Fs) dirExists(ctx context.Context, dir string) (bool, error) {
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
f.debugf(f, "> Stat dirExists: %q", dir)
info, err := c.sftpClient.Stat(dir)
f.debugf(f, "< Stat dirExists: %#v, err=%#v", info, err)
f.putSftpConnection(&c, err)
if err != nil {
if os.IsNotExist(err) {
@@ -889,7 +896,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, errors.Wrap(err, "List")
}
f.debugf(f, "> ReadDir: %q", sftpDir)
infos, err := c.sftpClient.ReadDir(sftpDir)
f.debugf(f, "< ReadDir: %#v, err=%#v", infos, err)
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
@@ -980,7 +989,9 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
if err != nil {
return errors.Wrap(err, "mkdir")
}
f.debugf(f, "> Mkdir: %q", dirPath)
err = c.sftpClient.Mkdir(dirPath)
f.debugf(f, "< Mkdir: err=%#v", err)
f.putSftpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "mkdir %q failed", dirPath)
@@ -1011,7 +1022,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if err != nil {
return errors.Wrap(err, "Rmdir")
}
f.debugf(f, "> Rmdir: %q", root)
err = c.sftpClient.RemoveDirectory(root)
f.debugf(f, "< Rmdir: err=%#v", err)
f.putSftpConnection(&c, err)
return err
}
@@ -1031,10 +1044,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, errors.Wrap(err, "Move")
}
err = c.sftpClient.Rename(
srcObj.path(),
path.Join(f.absRoot, remote),
)
srcPath, dstPath := srcObj.path(), path.Join(f.absRoot, remote)
f.debugf(f, "> Rename file: src=%q, dst=%q", srcPath, dstPath)
err = c.sftpClient.Rename(srcPath, dstPath)
f.debugf(f, "< Rename file: err=%#v", err)
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
@@ -1083,10 +1096,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err != nil {
return errors.Wrap(err, "DirMove")
}
f.debugf(f, "> Rename dir: src=%q, dst=%q", srcPath, dstPath)
err = c.sftpClient.Rename(
srcPath,
dstPath,
)
f.debugf(f, "< Rename dir: err=%#v", err)
f.putSftpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
@@ -1102,7 +1117,9 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
}
defer f.putSftpConnection(&c, err)
f.debugf(f, "> NewSession run")
session, err := c.sshClient.NewSession()
f.debugf(f, "< NewSession run: %#v, err=%#v", session, err)
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP session")
}
@@ -1114,7 +1131,9 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
session.Stdout = &stdout
session.Stderr = &stderr
f.debugf(f, "> Run cmd: %q", cmd)
err = session.Run(cmd)
f.debugf(f, "< Run cmd: err=%#v", err)
if err != nil {
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
}
@@ -1261,7 +1280,9 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
if err != nil {
return "", errors.Wrap(err, "Hash get SFTP connection")
}
o.fs.debugf(o, "> NewSession hash")
session, err := c.sshClient.NewSession()
o.fs.debugf(o, "< NewSession hash: %#v, err=%#v", session, err)
o.fs.putSftpConnection(&c, err)
if err != nil {
return "", errors.Wrap(err, "Hash put SFTP connection")
@@ -1371,7 +1392,9 @@ func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err err
return nil, errors.Wrap(err, "stat")
}
absPath := path.Join(f.absRoot, remote)
f.debugf(f, "> Stat file: %q", absPath)
info, err = c.sftpClient.Stat(absPath)
f.debugf(f, "< Stat file: %#v, err=%#v", info, err)
f.putSftpConnection(&c, err)
return info, err
}
@@ -1403,7 +1426,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if err != nil {
return errors.Wrap(err, "SetModTime")
}
o.fs.debugf(o, "> Chtimes: %q, %v", o.path(), modTime)
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.debugf(o, "< Chtimes: err=%#v", err)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "SetModTime failed")
@@ -1491,7 +1516,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, errors.Wrap(err, "Open")
}
o.fs.debugf(o, "> Open read: %q", o.path())
sftpFile, err := c.sftpClient.Open(o.path())
o.fs.debugf(o, "< Open read: %#v, err=%#v", sftpFile, err)
o.fs.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
@@ -1530,7 +1557,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "Update")
}
o.fs.debugf(o, "> OpenFile write: %q", o.path())
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
o.fs.debugf(o, "< OpenFile write: %#v, err=%#v", file, err)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "Update Create failed")
@@ -1542,7 +1571,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(src, "Failed to open new SSH connection for delete: %v", removeErr)
return
}
o.fs.debugf(o, "> Remove file: %q", o.path())
removeErr = c.sftpClient.Remove(o.path())
o.fs.debugf(o, "< Remove file: err=%#v", removeErr)
o.fs.putSftpConnection(&c, removeErr)
if removeErr != nil {
fs.Debugf(src, "Failed to remove: %v", removeErr)
@@ -1591,7 +1622,9 @@ func (o *Object) Remove(ctx context.Context) error {
if err != nil {
return errors.Wrap(err, "Remove")
}
o.fs.debugf(o, "> Remove: %q", o.path())
err = c.sftpClient.Remove(o.path())
o.fs.debugf(o, "< Remove: err=%#v", err)
o.fs.putSftpConnection(&c, err)
return err
}

View File

@@ -145,8 +145,8 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
VFSOpt: vfsflags.Opt,
}
err := mnt.Mount()
if err == nil {
daemonized, err := mnt.Mount()
if !daemonized && err == nil {
err = mnt.Wait()
}
if err != nil {
@@ -167,21 +167,21 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
}
// Mount the remote at mountpoint
func (m *MountPoint) Mount() (err error) {
func (m *MountPoint) Mount() (daemonized bool, err error) {
if err = m.CheckOverlap(); err != nil {
return err
return false, err
}
if err = m.CheckAllowings(); err != nil {
return err
return false, err
}
m.SetVolumeName(m.MountOpt.VolumeName)
// Start background task if --background is specified
// Start background task if --daemon is specified
if m.MountOpt.Daemon {
daemonized := startBackgroundMode()
daemonized = startBackgroundMode()
if daemonized {
return nil
return true, nil
}
}
@@ -189,9 +189,9 @@ func (m *MountPoint) Mount() (err error) {
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
return false, errors.Wrap(err, "failed to mount FUSE fs")
}
return nil
return false, nil
}
// CheckOverlap checks that root doesn't overlap with mountpoint

View File

@@ -270,7 +270,7 @@ func (vol *Volume) mount(id string) error {
return errors.New("volume filesystem is not ready")
}
if err := vol.mnt.Mount(); err != nil {
if _, err := vol.mnt.Mount(); err != nil {
return err
}
vol.mnt.MountedOn = time.Now()

View File

@@ -69,6 +69,7 @@ control the stats printing.
return err
}
s.Bind(router)
httplib.Wait()
return nil
})
},

View File

@@ -511,3 +511,5 @@ put them back in again.` >}}
* Michael Hanselmann <public@hansmi.ch>
* Chuan Zh <zhchuan7@gmail.com>
* Antoine GIRARD <antoine.girard@sapk.fr>
* Justin Winokur (Jwink3101) <Jwink3101@users.noreply.github.com>
* Mariano Absatz (git) <scm@baby.com.ar>

View File

@@ -128,7 +128,7 @@ Client ID and Key by following the steps below:
1. Open https://portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade and then click `New registration`.
2. Enter a name for your app, choose account type `Accounts in any organizational directory (Any Azure AD directory - Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)`, select `Web` in `Redirect URI`, then type (do not copy and paste) `http://localhost:53682/` and click Register. Copy and keep the `Application (client) ID` under the app name for later use.
3. Under `manage` select `Certificates & secrets`, click `New client secret`. Copy and keep that secret for later use.
3. Under `manage` select `Certificates & secrets`, click `New client secret`. Enter a description (can be anything) and set `Expires` to 24 months. Copy and keep that secret _Value_ for later use (you _won't_ be able to see this value afterwards).
4. Under `manage` select `API permissions`, click `Add a permission` and select `Microsoft Graph` then select `delegated permissions`.
5. Search and select the following permissions: `Files.Read`, `Files.ReadWrite`, `Files.Read.All`, `Files.ReadWrite.All`, `offline_access`, `User.Read`. Once selected click `Add permissions` at the bottom.
@@ -582,3 +582,12 @@ Description: Due to a configuration change made by your administrator, or becaus
```
If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
#### Invalid request when making public links ####
On Sharepoint and OneDrive for Business, `rclone link` may return an "Invalid
request" error. A possible cause is that the organisation admin didn't allow
public links to be made for the organisation/sharepoint library. To fix the
permissions as an admin, take a look at the docs:
[1](https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off),
[2](https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3).

View File

@@ -1043,7 +1043,7 @@ func TryRmdir(ctx context.Context, f fs.Fs, dir string) error {
if SkipDestructive(ctx, fs.LogDirName(f, dir), "remove directory") {
return nil
}
fs.Debugf(fs.LogDirName(f, dir), "Removing directory")
fs.Infof(fs.LogDirName(f, dir), "Removing directory")
return f.Rmdir(ctx, dir)
}

View File

@@ -231,6 +231,11 @@ func (s *server) Serve() {
}
}
// Wait blocks while the server is serving requests
func (s *server) Wait() {
s.closing.Wait()
}
// Router returns the server base router
func (s *server) Router() chi.Router {
return s.baseRouter
@@ -291,6 +296,11 @@ func Restart() error {
return start()
}
// Wait blocks while the default http server is serving requests
func Wait() {
defaultServer.Wait()
}
// Start the default server
func start() error {
defaultServerMutex.Lock()

View File

@@ -276,13 +276,12 @@ func (wb *WriteBack) Add(id Handle, name string, modified bool, putFn PutFn) Han
return wbItem.id
}
// Remove should be called when a file should be removed from the
// _remove should be called when a file should be removed from the
// writeback queue. This cancels a writeback if there is one and
// doesn't return the item to the queue.
func (wb *WriteBack) Remove(id Handle) (found bool) {
wb.mu.Lock()
defer wb.mu.Unlock()
//
// This should be called with the lock held
func (wb *WriteBack) _remove(id Handle) (found bool) {
wbItem, found := wb.lookup[id]
if found {
fs.Debugf(wbItem.name, "vfs cache: cancelling writeback (uploading %v) %p item %d", wbItem.uploading, wbItem, wbItem.id)
@@ -299,6 +298,16 @@ func (wb *WriteBack) Remove(id Handle) (found bool) {
return found
}
// Remove should be called when a file should be removed from the
// writeback queue. This cancels a writeback if there is one and
// doesn't return the item to the queue.
func (wb *WriteBack) Remove(id Handle) (found bool) {
wb.mu.Lock()
defer wb.mu.Unlock()
return wb._remove(id)
}
// Rename should be called when a file might be uploading and it gains
// a new name. This will cancel the upload and put it back in the
// queue.
@@ -314,6 +323,15 @@ func (wb *WriteBack) Rename(id Handle, name string) {
// We are uploading already so cancel the upload
wb._cancelUpload(wbItem)
}
// Check to see if there are any uploads with the existing
// name and remove them
for existingID, existingItem := range wb.lookup {
if existingID != id && existingItem.name == name {
wb._remove(existingID)
}
}
wbItem.name = name
// Kick the timer on
wb.items._update(wbItem, wb._newExpiry())

View File

@@ -585,6 +585,53 @@ func TestWriteBackRename(t *testing.T) {
assert.Equal(t, wbItem.name, "three")
}
// TestWriteBackRenameDuplicates checks that if we rename an entry and
// make a duplicate, we remove the duplicate.
func TestWriteBackRenameDuplicates(t *testing.T) {
wb, cancel := newTestWriteBack(t)
defer cancel()
// add item "one"
pi1 := newPutItem(t)
id1 := wb.Add(0, "one", true, pi1.put)
wbItem1 := wb.lookup[id1]
checkOnHeap(t, wb, wbItem1)
checkInLookup(t, wb, wbItem1)
assert.Equal(t, wbItem1.name, "one")
<-pi1.started
checkNotOnHeap(t, wb, wbItem1)
checkInLookup(t, wb, wbItem1)
// add item "two"
pi2 := newPutItem(t)
id2 := wb.Add(0, "two", true, pi2.put)
wbItem2 := wb.lookup[id2]
checkOnHeap(t, wb, wbItem2)
checkInLookup(t, wb, wbItem2)
assert.Equal(t, wbItem2.name, "two")
<-pi2.started
checkNotOnHeap(t, wb, wbItem2)
checkInLookup(t, wb, wbItem2)
// rename "two" to "one"
wb.Rename(id2, "one")
// check "one" is cancelled and removed from heap and lookup
checkNotOnHeap(t, wb, wbItem1)
checkNotInLookup(t, wb, wbItem1)
assert.True(t, pi1.cancelled)
assert.Equal(t, wbItem1.name, "one")
// check "two" (now called "one"!) has been cancelled and will
// be retried
checkOnHeap(t, wb, wbItem2)
checkInLookup(t, wb, wbItem2)
assert.True(t, pi2.cancelled)
assert.Equal(t, wbItem2.name, "one")
}
func TestWriteBackCancelUpload(t *testing.T) {
wb, cancel := newTestWriteBack(t)
defer cancel()