1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-19 17:03:57 +00:00

Compare commits

..

75 Commits

Author SHA1 Message Date
Nick Craig-Wood
4dfcf6899d Version v1.56.2 2021-10-01 18:07:47 +01:00
Nolan Woods
81b3075e9d lib/http: Add auth to http service
Fixes https://github.com/rclone/rclone/issues/5620
2021-10-01 17:19:02 +01:00
Ivan Andreev
f988773230 ftp: fix deadlock after failed update when concurrency=1 2021-10-01 17:17:10 +01:00
Nick Craig-Wood
e75d57d638 Start v1.56.2-DEV development 2021-09-23 17:22:53 +01:00
Herby Gillot
0e26fda001 build: update golang.org/x/sys to fix crash on macOS when compiled with go1.17
Updates the golang.org/x/sys module to prevent a panic on startup on macOS when built with Go 1.17

This is for the stable branch only

Fixes: #5611
2021-09-23 17:22:10 +01:00
Nick Craig-Wood
86f13fa46b Version v1.56.1 2021-09-19 12:15:09 +01:00
Nick Craig-Wood
b9cf451177 Start v1.56.1-DEV development 2021-09-19 12:03:09 +01:00
Fred
b64258d92e seafile: fix 2fa state machine 2021-09-19 11:29:42 +01:00
x0b
7308100695 build: update Go to 1.16 and NDK to 22b for android/any 2021-09-19 11:29:11 +01:00
Ivan Andreev
9c27e080b3 build: apply gofmt from golang 1.17 2021-09-19 11:28:40 +01:00
Greg Sadetsky
21d84df81b docs/s3: fix typo in s3 documentation (#5515) 2021-09-18 12:23:30 +01:00
Nick Craig-Wood
3f17c729f1 pcloud: try harder to delete a failed upload
This fixes the integration tests when testing errored uploads
2021-09-18 12:22:15 +01:00
Nick Craig-Wood
d77f594ee7 pcloud: return an early error when Put is called with an unknown size
This stops the 10 minute pause in the integration tests
2021-09-18 12:21:58 +01:00
albertony
478434ffef vfs: fix issue where empty dirs would build up in cache meta dir 2021-09-18 12:20:47 +01:00
negative0
df52896a0e rc: fix speed does not update in core/stats 2021-09-18 12:20:16 +01:00
yedamo
29a99205ec selfupdate: fix --quiet option, not quite quiet
Fixes #5505
2021-09-18 12:19:34 +01:00
Greg Sadetsky
693f674f39 docs/drive: Fix lsf example without drive-impersonate (#5504) 2021-09-18 12:19:17 +01:00
Greg Sadetsky
a506373ca1 drive: fix instructions for auto config #5499 2021-09-18 12:18:11 +01:00
hota
a8ba15b90a s3: add Wasabi's AP-Northeast endpoint info
* Wasabi starts to provide AP Northeast (Tokyo) endpoint for all customers, so add it to the list

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-09-18 12:17:56 +01:00
Nick Craig-Wood
b26308c427 sftp: remove spurious error message on --sftp-disable-concurrent-reads 2021-08-31 13:27:59 +01:00
Nick Craig-Wood
03bcf81c5e sugarsync: fix initial connection after config re-arrangement - Fixes #5525
In this commit the config system was re-arranged

    94dbfa4ea fs: change Config callback into state based callback #3455

This passed the password as a temporary config parameter but forgot to
reveal it in the API call.
2021-08-14 12:53:51 +01:00
Nick Craig-Wood
8c1d4f17a8 accounting: fix maximum bwlimit by scaling scale max token bucket size
Before this fix, on Windows, the --bwlimit would max out at 2.5Gbps
even when set to 10 Gbps.

This turned out to be because of the maximum token bucket size.

This fix scales up the token bucket size linearly above a bwlimit of
2Gbps.

Fixes #5507
2021-08-13 16:56:08 +01:00
Nick Craig-Wood
e87de7c7e3 vfs: fix crash when truncating a just uploaded object - Fixes #5522 2021-08-11 11:55:45 +01:00
Ivan Andreev
7a31ef783a mountlib: restore daemon mode after #5415 2021-07-30 19:32:35 +01:00
Nick Craig-Wood
d0de426500 vfs: fix duplicates on rename - fixes #5469
Before this change, if there was an existing file being uploaded when
a file was renamed on top of it, then both would be uploaded. This
causes a duplicate in Google Drive as both files get uploaded at the
same time. This was triggered reliably by LibreOffice saving doc
files.

This fix removes any duplicates in the upload queue on rename.
2021-07-28 16:45:26 +01:00
Alex Chen
34f89043af onedrive: handle HTTP 400 better in PublicLink() (#5419) 2021-07-28 16:45:04 +01:00
Cnly
3ba001f8d7 http: fix serve http exits directly after starting 2021-07-28 16:44:42 +01:00
Mariano Absatz (git)
6bbf46961c clarification of the process for creating custom client_id 2021-07-28 16:44:29 +01:00
Nick Craig-Wood
37ff05a5fa Version v1.56.0 2021-07-20 19:45:41 +01:00
Nick Craig-Wood
c67c1ab4ee test makefiles: fix documentation so it doesn't have HTML in 2021-07-20 19:37:09 +01:00
Nick Craig-Wood
76f8095bc5 hdfs: fix documentation so it doesn't have HTML in 2021-07-20 19:36:30 +01:00
Nick Craig-Wood
f646cd0a2a librclone: add missing sync/* rc methods
See: https://forum.rclone.org/t/missing-directory-copy-move-methods-in-librclone/24503
2021-07-20 16:59:02 +01:00
Nick Craig-Wood
d38f6bb0ab gphotos: fix read only scope not being used properly
Before this change the read only scope was being ignored and rclone
was asking for a read-write scope.

https://forum.rclone.org/t/google-photos-copy-sync-errors/25153
2021-07-20 16:57:55 +01:00
Nick Craig-Wood
11d86c74b2 docs: expand contents and make docs full screen 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
feb6046a8a docs: add table of contents to every page 2021-07-20 16:53:21 +01:00
Nick Craig-Wood
807102ada2 drive: fix config system overwriting team drive ID - fixes #5454 2021-07-20 16:51:59 +01:00
Nick Craig-Wood
770b3496a1 config: fix in memory config not saving on the fly backend config
Before this fix, saving a :backend config gave the error

    Can't save config "token" = "XXX" for on the fly backend ":backend"

Even when using the in-memory config `--config ""`

This fixes the problem by
- always using the in memory config if it is configured
- moving the check for a :backend config save to the file config backend

It also removes the contents of the config items being saved from the
log which saves confidential tokens being logged.

Fixes #5451
2021-07-20 12:09:38 +01:00
buengese
da36ce08e4 docs/jottacloud: add short note on how no versions option works 2021-07-15 17:29:30 +02:00
buengese
8652cfe575 jottacloud: add no versions option 2021-07-15 17:29:30 +02:00
Nick Craig-Wood
94b1439299 drive: fix some google docs being treated as files - fixes #5455
At some point some google docs files started having sizes returned in
their listing information.

This then caused rclone to treat the docs as files which caused
downloads to fail.

The API docs now state that google docs may have sizes (whereas I'm
pretty sure it didn't earlier).

This fix removes the check for size, so google docs are identified
solely by not having an MD5 checksum.
2021-07-14 11:40:58 +01:00
Nick Craig-Wood
97c9e55ddb Add Antoine GIRARD to contributors 2021-07-14 11:40:57 +01:00
Ivan Andreev
c0b2832509 docs: serve docker: fix URL of systemd contrib files (#5415) 2021-07-11 13:23:00 +03:00
Ivan Andreev
7436768d62 docs for serve docker and docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Ivan Andreev
55153403aa build docker plugin (#5415) 2021-07-10 23:56:09 +03:00
Antoine GIRARD
daf449b5f2 cmd/serve: add serve docker command (#5415)
Fixes #4750

Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Antoine GIRARD
221dfc3882 mountlib: refactor before adding serve docker (#5415)
Co-authored-by: Ivan Andreev <ivandeex@gmail.com>
2021-07-10 23:56:09 +03:00
Nick Craig-Wood
aab29353d1 Update email address for Serge Pouliquen 2021-07-08 12:49:13 +01:00
Nick Craig-Wood
c24504b793 Add Chuan Zh to contributors 2021-07-08 12:47:35 +01:00
Nick Craig-Wood
6338d0026e Add Michael Hanselmann to contributors 2021-07-08 12:47:35 +01:00
Chuan Zh
ba836d45ff s3: update Alibaba OSS endpoints 2021-07-08 12:03:04 +01:00
Ole Frost
367cf984af docs: added tip to reduce SharePoint throttling - fixes #5404 2021-07-08 11:39:52 +01:00
Michael Hanselmann
6b7d7d0441 atexit: Terminate with non-zero status after receiving signal
When rclone received a SIGINT (Ctrl+C) or SIGTERM signal while an atexit
function is registered it always terminated with status code 0. Unix
convention is to exit with a non-zero status code. Often it's
`128 + int(signum), but at least not zero.

With this change fatal signals handled by the `atexit` package cause
a non-zero exit code. On Unix systems it's `128 + int(signum)` while
on other systems, such as Windows, it's always 2 ("error not otherwise
categorised").

Resolves #5437.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Michael Hanselmann
cf19073ac9 cmd: Move exit status codes to separate package
Signal handling by the `atexit` package needs acceess to
`exitCodeUncategorizedError`. With this change all exit status values
are moved to a dedicated package so that they can be reused.

Signed-off-by: Michael Hanselmann <public@hansmi.ch>
2021-07-07 17:59:26 +01:00
Nick Craig-Wood
ba5c559fec fs/sync: fix tests by only --compare-dest timestamp if have hash
This fixes the integration test errors introduced in #5410
2021-07-07 16:59:51 +01:00
Nick Craig-Wood
abb8fe8ba1 Add Haochen Tong to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
765af387e6 Add Dmitry Sitnikov to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
d05cf6aba8 Add partev to contributors 2021-07-07 16:59:51 +01:00
Nick Craig-Wood
76a3fef24d Add Xuanchen Wu to contributors 2021-07-07 16:59:51 +01:00
Ivan Andreev
b40d9bd4c4 cmd: add hashSUM file support (#5352)
Currently rclone check supports matching two file trees by sizes and hashes.
This change adds support for SUM files produced by GNU utilities like sha1sum.

Fixes #1005 

Note: checksum by default checks, hashsum by default prints sums.
New flag is named "--checkfile" but carries hash name.
Summary of introduced command forms:

```
rclone check sums.sha1 remote:path --checkfile sha1
rclone checksum sha1 sums.sha1 remote:path             
rclone hashsum sha1 remote:path --checkfile sums.sha1
rclone sha1sum remote:path --checkfile sums.sha1
rclone md5sum remote:path --checkfile sums.md5
```
2021-07-07 18:34:16 +03:00
Ivan Andreev
4680c0776d backend/local: skip entries removed concurrently with List() (#5297)
This change fixes the bug described below:
if a file is removed while the local backend List() runs,
the call will flag an accounting error.
The bug manifests itself if local backend is the Sync target
due to intrinsic concurrency.
The odds to hit this bug depend on --checkers and --transfers.
Chunker over local backend is affected even more because
updating a composite object with a smaller size content
translates into removing chunks on the underlying file system
and involves a number of List() calls.
2021-07-07 16:50:19 +03:00
buengese
fb305b5976 fichier: check that we actually got a download token and retry if we didn't 2021-07-06 14:58:50 +02:00
Ole Frost
5e91b93e59 cmdtest: end-to-end test for commands, flags and environment variables
There was no easy way to automatically test the end-to-end functionality
of commands, flags, environment variables etc.

The need for end-to-end testing was highlighted by the issues fixed
in #5341. There was no automated test to continually verify current
behaviour, nor a framework to quickly test the correctness of the fixes.

This change adds an end-to-end testing framework in the cmdtest folder.
It has some simple examples in func TestCmdTest in cmdtest_test.go. The
tests should be readable by anybody familiar with rclone and look like
this:

    // Test the rclone version command with debug logging (-vv)
    out, err = rclone("version", "-vv")
    if assert.NoError(t, err) {
        assert.Contains(t, out, "rclone v")
        assert.Contains(t, out, "os/version:")
        assert.Contains(t, out, " DEBUG : ")
    }

The end-to-end tests are executed just like the Go unit tests, that is:

    go test ./cmdtest -v

The change also contains a thorough test of environment variables in
environment_test.go.

Thanks to @ncw for encouragement and introduction to the TestMain trick.
2021-07-05 16:38:20 +01:00
Ole Frost
58c99427b3 config: fixed issues with flags/options set by environment vars.
Some environment variables didn’t behave like their corresponding
command line flags. The affected flags were --stats, --log-level,
--separator, --multi-tread-streams, --rc-addr, --rc-user and --rc-pass.
Example:

    RCLONE_STATS='10s'
    rclone check remote: remote: --progress
    # Expected: rclone check remote: remote: --progress –-stats=10s
    # Actual: rclone check remote: remote: --progress

Remote specific options set by environment variables was overruled by
less specific backend options set by environment variables. Example:

    RCLONE_DRIVE_USE_TRASH='false'
    RCLONE_CONFIG_MYDRIVE_USE_TRASH='true'
    rclone deletefile myDrive:my-test-file
    # Expected: my-test-file is recoverable in the trash folder
    # Actual: my-test-file is permanently deleted (not recoverable)

Backend specific options set by environment variables was overruled by
general backend options set by environment variables. Example:

    RCLONE_SKIP_LINKS='true'
    RCLONE_LOCAL_SKIP_LINKS='false'
    rclone lsd local:
    # Expected result: Warnings when symlinks are skipped
    # Actual result: No warnings when symlinks are skipped
    # That is RCLONE_SKIP_LINKS takes precedence

The above issues have been fixed.

The debug logging (-vv) has been enhanced to show when flags are set by
environment variables.

The documentation has been enhanced with details on the precedence of
configuration options.

See pull request #5341 for more information.
2021-07-05 16:38:20 +01:00
albertony
fee0abf513 docs: add note about use of user and logname environment variables for current username 2021-07-05 16:31:16 +01:00
Nick Gaya
40024990b7 fs/operations: Don't update timestamps of files in --compare-dest 2021-07-05 16:29:44 +01:00
Haochen Tong
04aa6969a4 accounting: calculate rolling average speed 2021-07-05 16:27:33 +01:00
Haochen Tong
d2050523de accounting: fix startTime of statsGroups.sum 2021-07-05 16:27:33 +01:00
Ivan Andreev
1cc6dd349e Add google search widget to rclone.org 2021-07-05 16:21:36 +01:00
Ole Frost
721bae11c3 docs: ease contribution for beginners in Go, Git and GitHub
Improved/added steps to:
 * Install Git with basic setup
 * Use both SSH and HTTPS for the git origin
 * Install Go and verify the GOPATH
 * Update the forked master
 * Find a popular editor for Go
2021-07-05 16:03:53 +01:00
Dmitry Sitnikov
b439199578 azureblob: Fix typo in Azure Blob help
Change the command to create RBAC file to the correct one
`az ad sp create-for-rbac`
Add the link to the command documentation
https://docs.microsoft.com/en-us/cli/azure/ad/sp?view=azure-cli-latest#az_ad_sp_create_for_rbac
2021-07-05 15:58:41 +01:00
partev
0bfd6f793b docs: replace OSX with macOS 2021-07-05 14:51:00 +01:00
Nick Craig-Wood
76ea716abf ftp: make upload error 250 indicate success
Some servers seem to send return code 250 to indicate successful
upload - previously rclone was treating this as an error.

See: https://forum.rclone.org/t/transfer-on-mega-in-ftp-mode-is-not-working/24642/
2021-07-05 10:35:02 +01:00
Alex Chen
e635f4c0be fs: make --dump imply -vv (#5418) 2021-06-23 00:32:26 +08:00
Xuanchen Wu
0cb973f127 onedrive: Make link return direct download link (#5417)
Co-authored-by: Cnly <minecnly@gmail.com>
2021-06-22 21:25:08 +08:00
Alex Chen
96ace599a8 fs: fix logging level mentioned in docs of Logf 2021-06-21 23:30:26 +08:00
336 changed files with 17237 additions and 2998 deletions

View File

@@ -241,14 +241,14 @@ jobs:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go 1.14 - name: Set up Go 1.16
uses: actions/setup-go@v1 uses: actions/setup-go@v1
with: with:
go-version: 1.14 go-version: 1.16
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes. # Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version - name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
- name: Go module cache - name: Go module cache
uses: actions/cache@v2 uses: actions/cache@v2
@@ -279,7 +279,7 @@ jobs:
- name: arm-v7a Set environment variables - name: arm-v7a Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV echo 'GOARCH=arm' >> $GITHUB_ENV
@@ -292,7 +292,7 @@ jobs:
- name: arm64-v8a Set environment variables - name: arm64-v8a Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV echo 'GOARCH=arm64' >> $GITHUB_ENV
@@ -305,7 +305,7 @@ jobs:
- name: x86 Set environment variables - name: x86 Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV echo 'GOARCH=386' >> $GITHUB_ENV
@@ -318,7 +318,7 @@ jobs:
- name: x64 Set environment variables - name: x64 Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV echo 'GOARCH=amd64' >> $GITHUB_ENV

View File

@@ -32,3 +32,40 @@ jobs:
publish: true publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }} dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }} dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build and publish docker volume plugin
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set plugin parameters
shell: bash
run: |
GITHUB_REF=${{ github.ref }}
PLUGIN_IMAGE_USER=rclone
PLUGIN_IMAGE_NAME=docker-volume-rclone
PLUGIN_IMAGE_TAG=${GITHUB_REF#refs/tags/}
PLUGIN_IMAGE=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:${PLUGIN_IMAGE_TAG}
PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:latest
echo "PLUGIN_IMAGE_USER=${PLUGIN_IMAGE_USER}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE_NAME=${PLUGIN_IMAGE_NAME}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE_TAG=${PLUGIN_IMAGE_TAG}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE=${PLUGIN_IMAGE}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_LATEST}" >> $GITHUB_ENV
- name: Build image
shell: bash
run: |
make docker-plugin
- name: Push image
shell: bash
run: |
docker login -u ${{ secrets.DOCKER_HUB_USER }} -p ${{ secrets.DOCKER_HUB_PASSWORD }}
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE}
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE_LATEST}

1
.gitignore vendored
View File

@@ -13,3 +13,4 @@ rclone.iml
fuzz-build.zip fuzz-build.zip
*.orig *.orig
*.rej *.rej
Thumbs.db

View File

@@ -12,95 +12,162 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/): with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone -V`) * Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit) * Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) * The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`) * A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them * if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ## ## Submitting a new feature or bug fix ##
If you find a bug that you'd like to fix, or a new feature that you'd If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub. like to implement then please submit a pull request via GitHub.
If it is a big feature then make an issue first so it can be discussed. If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go To prepare your pull request first press the fork button on [rclone's GitHub
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone). page](https://github.com/rclone/rclone).
Now in your terminal Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
git clone https://github.com/rclone/rclone.git git clone https://github.com/rclone/rclone.git
cd rclone cd rclone
git remote rename origin upstream git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
go build # otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
Make a branch to add your new feature Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
go version
Great, you can now compile and execute your own version of rclone:
go build
./rclone version
Finally make a branch to add your new feature
git checkout -b my-new-feature git checkout -b my-new-feature
And get hacking. And get hacking.
When ready - run the unit tests for the code you changed You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the code you changed
cd folder/with/changed/files
go test -v go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests. of the unit tests.
Note the top level Makefile targets This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
Make sure you Make sure you
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature. * Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages). * [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
When you are done with that When you are done with that push your changes to Github:
git push -u origin my-new-feature git push -u origin my-new-feature
Go to the GitHub website and click [Create pull and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff. Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running: You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
## CI for your fork ## ## Using Git and Github ##
### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then:
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ###
To combine your commits into one commit:
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
git commit # To commit the undone commits as one
otherwise, you may roll back using:
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ###
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository. rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ## ## Testing ##
### Quick testing ###
rclone's tests are run from the go testing framework, so at the top rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
go test -v ./... go test -v ./...
You can also use `make`, if supported by your platform
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ###
rclone contains a mixture of unit tests and integration tests. rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can storage systems by mocking all their interfaces, rclone unit tests can
@@ -134,12 +201,19 @@ project root:
go install github.com/rclone/rclone/fstest/test_all go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive test_all -backend drive
### Full integration testing ###
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
then change into the project root and run then change into the project root and run
make check
make test make test
This command is run daily on the integration test server. You can The commands may require some extra go packages which you can install with
make build_dep
The full integration tests are run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## ## Code Organisation ##
@@ -154,6 +228,7 @@ with modules beneath.
* cmd - the rclone commands * cmd - the rclone commands
* all - import this to load all the commands * all - import this to load all the commands
* ...commands * ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website * docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated * content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file * command - these are auto generated - edit the corresponding .go file

2536
MANUAL.html generated

File diff suppressed because it is too large Load Diff

3067
MANUAL.md generated

File diff suppressed because it is too large Load Diff

3219
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -256,3 +256,36 @@ startstable:
winzip: winzip:
zip -9 rclone-$(TAG).zip rclone.exe zip -9 rclone-$(TAG).zip rclone.exe
# docker volume plugin
PLUGIN_IMAGE_USER ?= rclone
PLUGIN_IMAGE_TAG ?= latest
PLUGIN_IMAGE_NAME ?= docker-volume-rclone
PLUGIN_IMAGE ?= $(PLUGIN_IMAGE_USER)/$(PLUGIN_IMAGE_NAME):$(PLUGIN_IMAGE_TAG)
PLUGIN_BASE_IMAGE := rclone/rclone:latest
PLUGIN_BUILD_DIR := ./build/docker-plugin
PLUGIN_CONTRIB_DIR := ./cmd/serve/docker/contrib/plugin
PLUGIN_CONFIG := $(PLUGIN_CONTRIB_DIR)/config.json
PLUGIN_DOCKERFILE := $(PLUGIN_CONTRIB_DIR)/Dockerfile
PLUGIN_CONTAINER := docker-volume-rclone-dev-$(shell date +'%Y%m%d-%H%M%S')
docker-plugin: docker-plugin-rootfs docker-plugin-create
docker-plugin-image: rclone
docker build --no-cache --pull --build-arg BASE_IMAGE=${PLUGIN_BASE_IMAGE} -t ${PLUGIN_IMAGE} -f ${PLUGIN_DOCKERFILE} .
docker-plugin-rootfs: docker-plugin-image
mkdir -p ${PLUGIN_BUILD_DIR}/rootfs
docker create --name ${PLUGIN_CONTAINER} ${PLUGIN_IMAGE}
docker export ${PLUGIN_CONTAINER} | tar -x -C ${PLUGIN_BUILD_DIR}/rootfs
docker rm -vf ${PLUGIN_CONTAINER}
cp ${PLUGIN_CONFIG} ${PLUGIN_BUILD_DIR}/config.json
docker-plugin-create:
docker plugin rm -f ${PLUGIN_IMAGE} 2>/dev/null || true
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
docker-plugin-push: docker-plugin-create
docker plugin push ${PLUGIN_IMAGE}
docker plugin rm ${PLUGIN_IMAGE}

View File

@@ -1 +1 @@
v1.56.0 v1.56.2

View File

@@ -1,5 +1,6 @@
// Test AmazonCloudDrive filesystem interface // Test AmazonCloudDrive filesystem interface
//go:build acd
// +build acd // +build acd
package amazonclouddrive_test package amazonclouddrive_test

View File

@@ -1,5 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system // Package azureblob provides an interface to the Microsoft Azure blob object storage system
//go:build !plan9 && !solaris && !js && go1.14
// +build !plan9,!solaris,!js,go1.14 // +build !plan9,!solaris,!js,go1.14
package azureblob package azureblob
@@ -80,13 +81,12 @@ func init() {
Leave blank normally. Needed only if you want to use a service principal instead of interactive login. Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
$ az sp create-for-rbac --name "<name>" \ $ az ad sp create-for-rbac --name "<name>" \
--role "Storage Blob Data Owner" \ --role "Storage Blob Data Owner" \
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \ --scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
> azure-principal.json > azure-principal.json
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
for more details.
`, `,
}, { }, {
Name: "key", Name: "key",

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !solaris && !js && go1.14
// +build !plan9,!solaris,!js,go1.14 // +build !plan9,!solaris,!js,go1.14
package azureblob package azureblob

View File

@@ -1,5 +1,6 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js && go1.14
// +build !plan9,!solaris,!js,go1.14 // +build !plan9,!solaris,!js,go1.14
package azureblob package azureblob

View File

@@ -1,6 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || solaris || js || !go1.14
// +build plan9 solaris js !go1.14 // +build plan9 solaris js !go1.14
package azureblob package azureblob

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !solaris && !js && go1.14
// +build !plan9,!solaris,!js,go1.14 // +build !plan9,!solaris,!js,go1.14
package azureblob package azureblob

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !solaris && !js && go1.14
// +build !plan9,!solaris,!js,go1.14 // +build !plan9,!solaris,!js,go1.14
package azureblob package azureblob

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js //go:build !plan9 && !js && !race
// +build !race // +build !plan9,!js,!race
package cache_test package cache_test

View File

@@ -1,7 +1,7 @@
// Test Cache filesystem interface // Test Cache filesystem interface
// +build !plan9,!js //go:build !plan9 && !js && !race
// +build !race // +build !plan9,!js,!race
package cache_test package cache_test

View File

@@ -1,6 +1,7 @@
// Build for cache for unsupported platforms to stop go complaining // Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js // +build plan9 js
package cache package cache

View File

@@ -1,5 +1,5 @@
// +build !plan9,!js //go:build !plan9 && !js && !race
// +build !race // +build !plan9,!js,!race
package cache_test package cache_test

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cache package cache

View File

@@ -210,12 +210,19 @@ func init() {
if opt.TeamDriveID == "" { if opt.TeamDriveID == "" {
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n") return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
} }
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)) return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
case "teamdrive_ok": case "teamdrive_ok":
if config.Result == "false" { if config.Result == "false" {
m.Set("team_drive", "") m.Set("team_drive", "")
return nil, nil return nil, nil
} }
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_change":
if config.Result == "false" {
return nil, nil
}
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_config":
f, err := newFs(ctx, name, "", m) f, err := newFs(ctx, name, "", m)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives") return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
@@ -1321,8 +1328,8 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
// //
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). // When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) { func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum or a length it is a file stored on drive // If item has MD5 sum it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 { if info.Md5Checksum != "" {
return f.newRegularObject(remote, info), nil return f.newRegularObject(remote, info), nil
} }
@@ -1355,8 +1362,8 @@ func (f *Fs) newObjectWithExportInfo(
// Pretend a dangling shortcut is a regular object // Pretend a dangling shortcut is a regular object
// It will error if used, but appear in listings so it can be deleted // It will error if used, but appear in listings so it can be deleted
return f.newRegularObject(remote, info), nil return f.newRegularObject(remote, info), nil
case info.Md5Checksum != "" || info.Size > 0: case info.Md5Checksum != "":
// If item has MD5 sum or a length it is a file stored on drive // If item has MD5 sum it is a file stored on drive
return f.newRegularObject(remote, info), nil return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs: case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType) fs.Debugf(remote, "Skipping google document type %q", info.MimeType)

View File

@@ -87,6 +87,11 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return &file, err return &file, err
} }
// maybe do some actual validation later if necessary
func validToken(token *GetTokenResponse) bool {
return token.Status == "OK"
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) { func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{ request := DownloadRequest{
URL: url, URL: url,
@@ -101,7 +106,8 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token) resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(ctx, resp, err) doretry, err := shouldRetry(ctx, resp, err)
return doretry || !validToken(&token), err
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list files") return nil, errors.Wrap(err, "couldn't list files")

View File

@@ -1050,10 +1050,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.Wrap(err, "Update") return errors.Wrap(err, "Update")
} }
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in) err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil { if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors _ = c.Quit() // toss this connection to avoid sync errors
remove() // recycle connection in advance to let remove() find free token
o.fs.putFtpConnection(nil, err) o.fs.putFtpConnection(nil, err)
remove()
return errors.Wrap(err, "update stor") return errors.Wrap(err, "update stor")
} }
o.fs.putFtpConnection(&c, nil) o.fs.putFtpConnection(&c, nil)

View File

@@ -53,6 +53,7 @@ const (
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly" scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary" scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
) )
var ( var (
@@ -61,7 +62,7 @@ var (
Scopes: []string{ Scopes: []string{
"openid", "openid",
"profile", "profile",
scopeReadWrite, scopeReadWrite, // this must be at position scopeAccess
}, },
Endpoint: google.Endpoint, Endpoint: google.Endpoint,
ClientID: rcloneClientID, ClientID: rcloneClientID,
@@ -89,9 +90,9 @@ func init() {
case "": case "":
// Fill in the scopes // Fill in the scopes
if opt.ReadOnly { if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else { } else {
oauthConfig.Scopes[0] = scopeReadWrite oauthConfig.Scopes[scopeAccess] = scopeReadWrite
} }
return oauthutil.ConfigOut("warning", &oauthutil.Options{ return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig, OAuth2Config: oauthConfig,

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs package hdfs

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs package hdfs
@@ -37,7 +38,7 @@ func init() {
Help: `Kerberos service principal name for the namenode Help: `Kerberos service principal name for the namenode
Enables KERBEROS authentication. Specifies the Service Principal Name Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`, (SERVICE/FQDN) for the namenode.`,
Required: false, Required: false,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker", Value: "hdfs/namenode.hadoop.docker",

View File

@@ -1,5 +1,6 @@
// Test HDFS filesystem interface // Test HDFS filesystem interface
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs_test package hdfs_test

View File

@@ -1,6 +1,7 @@
// Build for hdfs for unsupported platforms to stop go complaining // Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9
// +build plan9 // +build plan9
package hdfs package hdfs

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package hdfs package hdfs

View File

@@ -99,6 +99,11 @@ func init() {
Help: "Files bigger than this can be resumed if the upload fail's.", Help: "Files bigger than this can be resumed if the upload fail's.",
Default: fs.SizeSuffix(10 * 1024 * 1024), Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true, Advanced: true,
}, {
Name: "no_versions",
Help: "Avoid server side versioning by deleting files and recreating files instead of overwriting them.",
Default: false,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -297,6 +302,7 @@ type Options struct {
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"` MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"` TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"` HardDelete bool `config:"hard_delete"`
NoVersions bool `config:"no_versions"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"` UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -1494,6 +1500,20 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.opt.NoVersions {
err := o.readMetaData(ctx, false)
if err == nil {
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil {
return errors.Wrap(err, "failed to remove old object")
}
}
// if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err
}
}
o.fs.tokenRenewer.Start() o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop() defer o.fs.tokenRenewer.Stop()
size := src.Size() size := src.Size()
@@ -1584,8 +1604,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil return nil
} }
// Remove an object func (o *Object) remove(ctx context.Context, hard bool) error {
func (o *Object) Remove(ctx context.Context) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: o.filePath(), Path: o.filePath(),
@@ -1593,7 +1612,7 @@ func (o *Object) Remove(ctx context.Context) error {
NoResponse: true, NoResponse: true,
} }
if o.fs.opt.HardDelete { if hard {
opts.Parameters.Set("rm", "true") opts.Parameters.Set("rm", "true")
} else { } else {
opts.Parameters.Set("dl", "true") opts.Parameters.Set("dl", "true")
@@ -1605,6 +1624,11 @@ func (o *Object) Remove(ctx context.Context) error {
}) })
} }
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.remove(ctx, o.fs.opt.HardDelete)
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)

View File

@@ -1,3 +1,4 @@
//go:build darwin || dragonfly || freebsd || linux
// +build darwin dragonfly freebsd linux // +build darwin dragonfly freebsd linux
package local package local

View File

@@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package local package local

View File

@@ -1,4 +1,5 @@
//+build darwin //go:build darwin
// +build darwin
package local package local

View File

@@ -1,4 +1,5 @@
//+build !windows,!darwin //go:build !windows && !darwin
// +build !windows,!darwin
package local package local

View File

@@ -1,4 +1,5 @@
//+build windows //go:build windows
// +build windows
package local package local

View File

@@ -1,4 +1,5 @@
//+build !linux //go:build !linux
// +build !linux
package local package local

View File

@@ -1,4 +1,5 @@
//+build linux //go:build linux
// +build linux
package local package local

View File

@@ -1,3 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js // +build windows plan9 js
package local package local

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js // +build !windows,!plan9,!js
package local package local

View File

@@ -467,6 +467,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for _, name := range names { for _, name := range names {
namepath := filepath.Join(fsDirPath, name) namepath := filepath.Join(fsDirPath, name)
fi, fierr := os.Lstat(namepath) fi, fierr := os.Lstat(namepath)
if os.IsNotExist(fierr) {
// skip entry removed by a concurrent goroutine
continue
}
if fierr != nil { if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath) err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr) fs.Errorf(dir, "%v", fierr)

View File

@@ -1,5 +1,6 @@
// Device reading functions // Device reading functions
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris // +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local package local

View File

@@ -1,5 +1,6 @@
// Device reading functions // Device reading functions
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux netbsd openbsd solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local package local

View File

@@ -1,4 +1,5 @@
//+build !windows //go:build !windows
// +build !windows
package local package local

View File

@@ -1,4 +1,5 @@
//+build windows //go:build windows
// +build windows
package local package local

View File

@@ -1,3 +1,4 @@
//go:build !windows && !plan9 && !js
// +build !windows,!plan9,!js // +build !windows,!plan9,!js
package local package local

View File

@@ -1,3 +1,4 @@
//go:build windows || plan9 || js
// +build windows plan9 js // +build windows plan9 js
package local package local

View File

@@ -1500,10 +1500,85 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
fmt.Println(err) if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
}
return "", err return "", err
} }
return result.Link.WebURL, nil
shareURL := result.Link.WebURL
// Convert share link to direct download link if target is not a folder
// Not attempting to do the conversion for regional versions, just to be safe
if f.opt.Region != regionGlobal {
return shareURL, nil
}
if info.Folder != nil {
fs.Debugf(nil, "Can't convert share link for folder to direct link - returning the link as is")
return shareURL, nil
}
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
directURL := ""
segments := strings.Split(shareURL, "/")
switch f.driveType {
case driveTypePersonal:
// Method: https://stackoverflow.com/questions/37951114/direct-download-link-to-onedrive-file
if len(segments) != 5 {
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
enc := base64.StdEncoding.EncodeToString([]byte(shareURL))
enc = strings.ReplaceAll(enc, "/", "_")
enc = strings.ReplaceAll(enc, "+", "-")
enc = strings.ReplaceAll(enc, "=", "")
directURL = fmt.Sprintf("https://api.onedrive.com/v1.0/shares/u!%s/root/content", enc)
case driveTypeBusiness:
// Method: https://docs.microsoft.com/en-us/sharepoint/dev/spfx/shorter-share-link-format
// Example:
// https://{tenant}-my.sharepoint.com/:t:/g/personal/{user_email}/{Opaque_String}
// --convert to->
// https://{tenant}-my.sharepoint.com/personal/{user_email}/_layouts/15/download.aspx?share={Opaque_String}
if len(segments) != 8 {
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
directURL = fmt.Sprintf("https://%s/%s/%s/_layouts/15/download.aspx?share=%s",
segments[2], segments[5], segments[6], segments[7])
case driveTypeSharepoint:
// Method: Similar to driveTypeBusiness
// Example:
// https://{tenant}.sharepoint.com/:t:/s/{site_name}/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/sites/{site_name}/_layouts/15/download.aspx?share={Opaque_String}
//
// https://{tenant}.sharepoint.com/:t:/t/{team_name}/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/teams/{team_name}/_layouts/15/download.aspx?share={Opaque_String}
//
// https://{tenant}.sharepoint.com/:t:/g/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/_layouts/15/download.aspx?share={Opaque_String}
if len(segments) < 6 || len(segments) > 7 {
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
pathPrefix := ""
switch segments[4] {
case "s": // Site
pathPrefix = "/sites/" + segments[5]
case "t": // Team
pathPrefix = "/teams/" + segments[5]
case "g": // Root site
default:
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
directURL = fmt.Sprintf("https://%s%s/_layouts/15/download.aspx?share=%s",
segments[2], pathPrefix, segments[len(segments)-1])
}
return directURL, nil
} }
// CleanUp deletes all the hidden files. // CleanUp deletes all the hidden files.

View File

@@ -1092,6 +1092,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
modTime := src.ModTime(ctx) modTime := src.ModTime(ctx)
remote := o.Remote() remote := o.Remote()
if size < 0 {
return errors.New("can't upload unknown sizes objects")
}
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
if err != nil { if err != nil {
@@ -1154,10 +1158,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}) })
if err != nil { if err != nil {
// sometimes pcloud leaves a half complete file on // sometimes pcloud leaves a half complete file on
// error, so delete it if it exists // error, so delete it if it exists, trying a few times
delObj, delErr := o.fs.NewObject(ctx, o.remote) for i := 0; i < 5; i++ {
if delErr == nil && delObj != nil { delObj, delErr := o.fs.NewObject(ctx, o.remote)
_ = delObj.Remove(ctx) if delErr == nil && delObj != nil {
_ = delObj.Remove(ctx)
break
}
time.Sleep(time.Second)
} }
return err return err
} }

View File

@@ -1,6 +1,7 @@
// Package qingstor provides an interface to QingStor object storage // Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/ // Home: https://www.qingcloud.com/
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package qingstor package qingstor

View File

@@ -1,5 +1,6 @@
// Test QingStor filesystem interface // Test QingStor filesystem interface
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package qingstor package qingstor

View File

@@ -1,6 +1,7 @@
// Build for unsupported platforms to stop go complaining // Build for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js // +build plan9 js
package qingstor package qingstor

View File

@@ -1,5 +1,6 @@
// Upload object to QingStor // Upload object to QingStor
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package qingstor package qingstor

View File

@@ -430,6 +430,12 @@ func init() {
Help: "Endpoint for OSS API.", Help: "Endpoint for OSS API.",
Provider: "Alibaba", Provider: "Alibaba",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "oss-accelerate.aliyuncs.com",
Help: "Global Accelerate",
}, {
Value: "oss-accelerate-overseas.aliyuncs.com",
Help: "Global Accelerate (outside mainland China)",
}, {
Value: "oss-cn-hangzhou.aliyuncs.com", Value: "oss-cn-hangzhou.aliyuncs.com",
Help: "East China 1 (Hangzhou)", Help: "East China 1 (Hangzhou)",
}, { }, {
@@ -446,10 +452,22 @@ func init() {
Help: "North China 3 (Zhangjiakou)", Help: "North China 3 (Zhangjiakou)",
}, { }, {
Value: "oss-cn-huhehaote.aliyuncs.com", Value: "oss-cn-huhehaote.aliyuncs.com",
Help: "North China 5 (Huhehaote)", Help: "North China 5 (Hohhot)",
}, {
Value: "oss-cn-wulanchabu.aliyuncs.com",
Help: "North China 6 (Ulanqab)",
}, { }, {
Value: "oss-cn-shenzhen.aliyuncs.com", Value: "oss-cn-shenzhen.aliyuncs.com",
Help: "South China 1 (Shenzhen)", Help: "South China 1 (Shenzhen)",
}, {
Value: "oss-cn-heyuan.aliyuncs.com",
Help: "South China 2 (Heyuan)",
}, {
Value: "oss-cn-guangzhou.aliyuncs.com",
Help: "South China 3 (Guangzhou)",
}, {
Value: "oss-cn-chengdu.aliyuncs.com",
Help: "West China 1 (Chengdu)",
}, { }, {
Value: "oss-cn-hongkong.aliyuncs.com", Value: "oss-cn-hongkong.aliyuncs.com",
Help: "Hong Kong (Hong Kong)", Help: "Hong Kong (Hong Kong)",
@@ -611,6 +629,10 @@ func init() {
Value: "s3.eu-central-1.wasabisys.com", Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint", Help: "Wasabi EU Central endpoint",
Provider: "Wasabi", Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
Help: "Wasabi AP Northeast endpoint",
Provider: "Wasabi",
}}, }},
}, { }, {
Name: "location_constraint", Name: "location_constraint",

View File

@@ -325,17 +325,20 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
switch config.State { switch config.State {
case "": case "":
// Just make sure we do have a password // Empty state means it's the first call to the Config function
if password == "" { if password == "" {
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)") return fs.ConfigPassword("password", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
} }
return fs.ConfigGoto("password") // password was successfully loaded from the config
return fs.ConfigGoto("2fa")
case "password": case "password":
// password should be coming from the previous state (entered by the user)
password = config.Result password = config.Result
if password == "" { if password == "" {
return fs.ConfigError("password", "Password can't be blank") return fs.ConfigError("", "Password can't be blank")
} }
m.Set(configPassword, obscure.MustObscure(config.Result)) // save it into the configuration file and keep going
m.Set(configPassword, obscure.MustObscure(password))
return fs.ConfigGoto("2fa") return fs.ConfigGoto("2fa")
case "2fa": case "2fa":
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code") return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")

View File

@@ -1,10 +1,15 @@
package seafile package seafile
import ( import (
"context"
"path" "path"
"testing" "testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
type pathData struct { type pathData struct {
@@ -19,77 +24,77 @@ type pathData struct {
// from a mix of configuration data and path command line argument // from a mix of configuration data and path command line argument
func TestSplitPath(t *testing.T) { func TestSplitPath(t *testing.T) {
testData := []pathData{ testData := []pathData{
pathData{ {
configLibrary: "", configLibrary: "",
configRoot: "", configRoot: "",
argumentPath: "", argumentPath: "",
expectedLibrary: "", expectedLibrary: "",
expectedPath: "", expectedPath: "",
}, },
pathData{ {
configLibrary: "", configLibrary: "",
configRoot: "", configRoot: "",
argumentPath: "Library", argumentPath: "Library",
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: "", expectedPath: "",
}, },
pathData{ {
configLibrary: "", configLibrary: "",
configRoot: "", configRoot: "",
argumentPath: path.Join("Library", "path", "to", "file"), argumentPath: path.Join("Library", "path", "to", "file"),
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"), expectedPath: path.Join("path", "to", "file"),
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: "", configRoot: "",
argumentPath: "", argumentPath: "",
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: "", expectedPath: "",
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: "", configRoot: "",
argumentPath: "path", argumentPath: "path",
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: "path", expectedPath: "path",
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: "", configRoot: "",
argumentPath: path.Join("path", "to", "file"), argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"), expectedPath: path.Join("path", "to", "file"),
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: "root", configRoot: "root",
argumentPath: "", argumentPath: "",
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: "root", expectedPath: "root",
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: path.Join("root", "path"), configRoot: path.Join("root", "path"),
argumentPath: "", argumentPath: "",
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: path.Join("root", "path"), expectedPath: path.Join("root", "path"),
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: "root", configRoot: "root",
argumentPath: "path", argumentPath: "path",
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: path.Join("root", "path"), expectedPath: path.Join("root", "path"),
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: "root", configRoot: "root",
argumentPath: path.Join("path", "to", "file"), argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library", expectedLibrary: "Library",
expectedPath: path.Join("root", "path", "to", "file"), expectedPath: path.Join("root", "path", "to", "file"),
}, },
pathData{ {
configLibrary: "Library", configLibrary: "Library",
configRoot: path.Join("root", "path"), configRoot: path.Join("root", "path"),
argumentPath: path.Join("subpath", "to", "file"), argumentPath: path.Join("subpath", "to", "file"),
@@ -121,3 +126,98 @@ func TestSplitPathIntoSlice(t *testing.T) {
assert.Equal(t, expected, output) assert.Equal(t, expected, output)
} }
} }
func Test2FAStateMachine(t *testing.T) {
fixtures := []struct {
name string
mapper configmap.Mapper
input fs.ConfigIn
expectState string
expectErrorMessage string
expectResult string
expectFail bool
}{
{
name: "no url",
mapper: configmap.Simple{},
input: fs.ConfigIn{State: ""},
expectFail: true,
},
{
name: "2fa not set",
mapper: configmap.Simple{"url": "http://localhost/"},
input: fs.ConfigIn{State: ""},
expectFail: true,
},
{
name: "unknown state",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: "unknown"},
expectFail: true,
},
{
name: "no password in config",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: ""},
expectState: "password",
},
{
name: "config ready for 2fa token",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username", "pass": obscure.MustObscure("password")},
input: fs.ConfigIn{State: ""},
expectState: "2fa",
},
{
name: "password not entered",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: "password"},
expectState: "",
expectErrorMessage: "Password can't be blank",
},
{
name: "password entered",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: "password", Result: "password"},
expectState: "2fa",
},
{
name: "ask for a 2fa code",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: "2fa"},
expectState: "2fa_do",
},
{
name: "no 2fa code entered",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: "2fa_do"},
expectState: "2fa", // ask for a code again
expectErrorMessage: "2FA codes can't be blank",
},
{
name: "2fa error and retry",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: "2fa_error", Result: "true"},
expectState: "2fa", // ask for a code again
},
{
name: "2fa error and fail",
mapper: configmap.Simple{"url": "http://localhost/", "2fa": "true", "user": "username"},
input: fs.ConfigIn{State: "2fa_error"},
expectFail: true,
},
}
for _, fixture := range fixtures {
t.Run(fixture.name, func(t *testing.T) {
output, err := Config(context.Background(), "test", fixture.mapper, fixture.input)
if fixture.expectFail {
require.Error(t, err)
t.Log(err)
return
}
assert.Equal(t, fixture.expectState, output.State)
assert.Equal(t, fixture.expectErrorMessage, output.Error)
assert.Equal(t, fixture.expectResult, output.Result)
})
}
}

View File

@@ -1,5 +1,6 @@
// Package sftp provides a filesystem interface using github.com/pkg/sftp // Package sftp provides a filesystem interface using github.com/pkg/sftp
//go:build !plan9
// +build !plan9 // +build !plan9
package sftp package sftp
@@ -429,10 +430,6 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads), sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites), sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
) )
if f.opt.DisableConcurrentReads { // FIXME
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
}
return sftp.NewClientPipe(pr, pw, opts...) return sftp.NewClientPipe(pr, pw, opts...)
} }

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package sftp package sftp

View File

@@ -1,5 +1,6 @@
// Test Sftp filesystem interface // Test Sftp filesystem interface
//go:build !plan9
// +build !plan9 // +build !plan9
package sftp_test package sftp_test

View File

@@ -1,6 +1,7 @@
// Build for sftp for unsupported platforms to stop go complaining // Build for sftp for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9
// +build plan9 // +build plan9
package sftp package sftp

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package sftp package sftp

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package sftp package sftp

View File

@@ -1,3 +1,4 @@
//go:build ignore
// +build ignore // +build ignore
package main package main

View File

@@ -1,5 +1,6 @@
// Code generated by vfsgen; DO NOT EDIT. // Code generated by vfsgen; DO NOT EDIT.
//go:build !dev
// +build !dev // +build !dev
package sharefile package sharefile

View File

@@ -105,7 +105,7 @@ func init() {
authRequest := api.AppAuthorization{ authRequest := api.AppAuthorization{
Username: username, Username: username,
Password: password, Password: obscure.MustReveal(password),
Application: withDefault(opt.AppID, appID), Application: withDefault(opt.AppID, appID),
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID), AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)), PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
// Package tardigrade provides an interface to Tardigrade decentralized object storage. // Package tardigrade provides an interface to Tardigrade decentralized object storage.

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
package tardigrade package tardigrade

View File

@@ -1,3 +1,4 @@
//go:build !plan9
// +build !plan9 // +build !plan9
// Test Tardigrade filesystem interface // Test Tardigrade filesystem interface

View File

@@ -1,3 +1,4 @@
//go:build plan9
// +build plan9 // +build plan9
package tardigrade package tardigrade

View File

@@ -1,3 +1,5 @@
# Email addresses to ignore in the git log when making the authors.md file # Email addresses to ignore in the git log when making the authors.md file
<nick@raig-wood.com> <nick@raig-wood.com>
<anaghk.dos@gmail.com> <anaghk.dos@gmail.com>
<33207650+sp31415t1@users.noreply.github.com>
<unknown>

View File

@@ -1,3 +1,4 @@
//go:build ignore
// +build ignore // +build ignore
// Attempt to work out if branches have already been merged // Attempt to work out if branches have already been merged

View File

@@ -1,3 +1,4 @@
//go:build ignore
// +build ignore // +build ignore
// Cross compile rclone - in go because I hate bash ;-) // Cross compile rclone - in go because I hate bash ;-)

View File

@@ -1,3 +1,4 @@
//go:build ignore
// +build ignore // +build ignore
// Get the latest release from a github project // Get the latest release from a github project

View File

@@ -23,6 +23,7 @@ docs = [
"rc.md", "rc.md",
"overview.md", "overview.md",
"flags.md", "flags.md",
"docker.md",
# Keep these alphabetical by full name # Keep these alphabetical by full name
"fichier.md", "fichier.md",

View File

@@ -1,3 +1,4 @@
//go:build ignore
// +build ignore // +build ignore
// Test that the tests in the suite passed in are independent // Test that the tests in the suite passed in are independent

View File

@@ -10,6 +10,7 @@ import (
_ "github.com/rclone/rclone/cmd/cachestats" _ "github.com/rclone/rclone/cmd/cachestats"
_ "github.com/rclone/rclone/cmd/cat" _ "github.com/rclone/rclone/cmd/cat"
_ "github.com/rclone/rclone/cmd/check" _ "github.com/rclone/rclone/cmd/check"
_ "github.com/rclone/rclone/cmd/checksum"
_ "github.com/rclone/rclone/cmd/cleanup" _ "github.com/rclone/rclone/cmd/cleanup"
_ "github.com/rclone/rclone/cmd/cmount" _ "github.com/rclone/rclone/cmd/cmount"
_ "github.com/rclone/rclone/cmd/config" _ "github.com/rclone/rclone/cmd/config"

View File

@@ -1,3 +1,4 @@
//go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
package cachestats package cachestats

View File

@@ -1,6 +1,7 @@
// Build for cache for unsupported platforms to stop go complaining // Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || js
// +build plan9 js // +build plan9 js
package cachestats package cachestats

View File

@@ -2,6 +2,7 @@ package check
import ( import (
"context" "context"
"fmt"
"io" "io"
"os" "os"
"strings" "strings"
@@ -17,20 +18,22 @@ import (
// Globals // Globals
var ( var (
download = false download = false
oneway = false oneway = false
combined = "" combined = ""
missingOnSrc = "" missingOnSrc = ""
missingOnDst = "" missingOnDst = ""
match = "" match = ""
differ = "" differ = ""
errFile = "" errFile = ""
checkFileHashType = ""
) )
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.") flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type")
AddFlags(cmdFlags) AddFlags(cmdFlags)
} }
@@ -126,7 +129,6 @@ func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err
} }
return opt, close, nil return opt, close, nil
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{
@@ -144,16 +146,39 @@ If you supply the |--download| flag, it will download the data from
both remotes and check them against each other on the fly. This can both remotes and check them against each other on the fly. This can
be useful for remotes that don't support hashes or if you really want be useful for remotes that don't support hashes or if you really want
to check all the data. to check all the data.
If you supply the |--checkfile HASH| flag with a valid hash name,
the |source:path| must point to a text file in the SUM format.
`, "|", "`") + FlagsHelp, `, "|", "`") + FlagsHelp,
Run: func(command *cobra.Command, args []string) { RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 2, command, args) cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args) var (
fsrc, fdst fs.Fs
hashType hash.Type
fsum fs.Fs
sumFile string
)
if checkFileHashType != "" {
if err := hashType.Set(checkFileHashType); err != nil {
fmt.Println(hash.HelpString(0))
return err
}
fsum, sumFile, fsrc = cmd.NewFsSrcFileDst(args)
} else {
fsrc, fdst = cmd.NewFsSrcDst(args)
}
cmd.Run(false, true, command, func() error { cmd.Run(false, true, command, func() error {
opt, close, err := GetCheckOpt(fsrc, fdst) opt, close, err := GetCheckOpt(fsrc, fdst)
if err != nil { if err != nil {
return err return err
} }
defer close() defer close()
if checkFileHashType != "" {
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download)
}
if download { if download {
return operations.CheckDownload(context.Background(), opt) return operations.CheckDownload(context.Background(), opt)
} }
@@ -165,5 +190,6 @@ to check all the data.
} }
return operations.Check(context.Background(), opt) return operations.Check(context.Background(), opt)
}) })
return nil
}, },
} }

57
cmd/checksum/checksum.go Normal file
View File

@@ -0,0 +1,57 @@
package checksum
import (
"context"
"fmt"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/check" // for common flags
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var download = false
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents.")
check.AddFlags(cmdFlags)
}
var commandDefinition = &cobra.Command{
Use: "checksum <hash> sumfile src:path",
Short: `Checks the files in the source against a SUM file.`,
Long: strings.ReplaceAll(`
Checks that hashsums of source files match the SUM file.
It compares hashes (MD5, SHA1, etc) and logs a report of files which
don't match. It doesn't alter the file system.
If you supply the |--download| flag, it will download the data from remote
and calculate the contents hash on the fly. This can be useful for remotes
that don't support hashes or if you really want to check all the data.
`, "|", "`") + check.FlagsHelp,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 3, command, args)
var hashType hash.Type
if err := hashType.Set(args[0]); err != nil {
fmt.Println(hash.HelpString(0))
return err
}
fsum, sumFile, fsrc := cmd.NewFsSrcFileDst(args[1:])
cmd.Run(false, true, command, func() error {
opt, close, err := check.GetCheckOpt(nil, fsrc)
if err != nil {
return err
}
defer close()
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download)
})
return nil
},
}

View File

@@ -37,6 +37,7 @@ import (
"github.com/rclone/rclone/fs/rc/rcserver" "github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo" "github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/exitcode"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/terminal" "github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -60,19 +61,6 @@ var (
errorTooManyArguments = errors.New("too many arguments") errorTooManyArguments = errors.New("too many arguments")
) )
const (
exitCodeSuccess = iota
exitCodeUsageError
exitCodeUncategorizedError
exitCodeDirNotFound
exitCodeFileNotFound
exitCodeRetryError
exitCodeNoRetryError
exitCodeFatalError
exitCodeTransferExceeded
exitCodeNoFilesTransferred
)
// ShowVersion prints the version to stdout // ShowVersion prints the version to stdout
func ShowVersion() { func ShowVersion() {
osVersion, osKernel := buildinfo.GetOSVersion() osVersion, osKernel := buildinfo.GetOSVersion()
@@ -484,31 +472,31 @@ func resolveExitCode(err error) {
if err == nil { if err == nil {
if ci.ErrorOnNoTransfer { if ci.ErrorOnNoTransfer {
if accounting.GlobalStats().GetTransfers() == 0 { if accounting.GlobalStats().GetTransfers() == 0 {
os.Exit(exitCodeNoFilesTransferred) os.Exit(exitcode.NoFilesTransferred)
} }
} }
os.Exit(exitCodeSuccess) os.Exit(exitcode.Success)
} }
_, unwrapped := fserrors.Cause(err) _, unwrapped := fserrors.Cause(err)
switch { switch {
case unwrapped == fs.ErrorDirNotFound: case unwrapped == fs.ErrorDirNotFound:
os.Exit(exitCodeDirNotFound) os.Exit(exitcode.DirNotFound)
case unwrapped == fs.ErrorObjectNotFound: case unwrapped == fs.ErrorObjectNotFound:
os.Exit(exitCodeFileNotFound) os.Exit(exitcode.FileNotFound)
case unwrapped == errorUncategorized: case unwrapped == errorUncategorized:
os.Exit(exitCodeUncategorizedError) os.Exit(exitcode.UncategorizedError)
case unwrapped == accounting.ErrorMaxTransferLimitReached: case unwrapped == accounting.ErrorMaxTransferLimitReached:
os.Exit(exitCodeTransferExceeded) os.Exit(exitcode.TransferExceeded)
case fserrors.ShouldRetry(err): case fserrors.ShouldRetry(err):
os.Exit(exitCodeRetryError) os.Exit(exitcode.RetryError)
case fserrors.IsNoRetryError(err): case fserrors.IsNoRetryError(err):
os.Exit(exitCodeNoRetryError) os.Exit(exitcode.NoRetryError)
case fserrors.IsFatalError(err): case fserrors.IsFatalError(err):
os.Exit(exitCodeFatalError) os.Exit(exitcode.FatalError)
default: default:
os.Exit(exitCodeUsageError) os.Exit(exitcode.UsageError)
} }
} }
@@ -539,7 +527,8 @@ func AddBackendFlags() {
if opt.IsPassword { if opt.IsPassword {
help += " (obscured)" help += " (obscured)"
} }
flag := flags.VarPF(pflag.CommandLine, opt, name, opt.ShortOpt, help) flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
flags.SetDefaultFromEnv(pflag.CommandLine, name)
if _, isBool := opt.Default.(bool); isBool { if _, isBool := opt.Default.(bool); isBool {
flag.NoOptDefVal = "true" flag.NoOptDefVal = "true"
} }

View File

@@ -1,3 +1,4 @@
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
// +build cmount // +build cmount
// +build cgo // +build cgo
// +build linux darwin freebsd windows // +build linux darwin freebsd windows

View File

@@ -2,6 +2,7 @@
// //
// This uses the cgo based cgofuse library // This uses the cgo based cgofuse library
//go:build cmount && cgo && (linux || darwin || freebsd || windows)
// +build cmount // +build cmount
// +build cgo // +build cgo
// +build linux darwin freebsd windows // +build linux darwin freebsd windows

View File

@@ -1,8 +1,8 @@
// Build for macos with the brew tag to handle the absence // Build for macos with the brew tag to handle the absence
// of fuse and print an appropriate error message // of fuse and print an appropriate error message
// +build brew //go:build brew && darwin
// +build darwin // +build brew,darwin
package cmount package cmount

View File

@@ -1,3 +1,4 @@
//go:build cmount && cgo && (linux || darwin || freebsd || windows) && (!race || !windows)
// +build cmount // +build cmount
// +build cgo // +build cgo
// +build linux darwin freebsd windows // +build linux darwin freebsd windows

View File

@@ -1,6 +1,7 @@
// Build for cmount for unsupported platforms to stop go complaining // Build for cmount for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build (!linux && !darwin && !freebsd && !windows) || !brew || !cgo || !cmount
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount // +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
package cmount package cmount

View File

@@ -1,6 +1,5 @@
// +build cmount //go:build cmount && cgo && !windows
// +build cgo // +build cmount,cgo,!windows
// +build !windows
package cmount package cmount

View File

@@ -1,6 +1,5 @@
// +build cmount //go:build cmount && cgo && windows
// +build cgo // +build cmount,cgo,windows
// +build windows
package cmount package cmount

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"os" "os"
"strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd"
@@ -21,6 +20,7 @@ var (
OutputBase64 = false OutputBase64 = false
DownloadFlag = false DownloadFlag = false
HashsumOutfile = "" HashsumOutfile = ""
ChecksumFile = ""
) )
func init() { func init() {
@@ -33,6 +33,7 @@ func init() {
func AddHashFlags(cmdFlags *pflag.FlagSet) { func AddHashFlags(cmdFlags *pflag.FlagSet) {
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum") flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal") flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them")
flags.BoolVarP(cmdFlags, &DownloadFlag, "download", "", DownloadFlag, "Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote") flags.BoolVarP(cmdFlags, &DownloadFlag, "download", "", DownloadFlag, "Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote")
} }
@@ -70,7 +71,7 @@ hashed locally enabling any hash for any remote.
Run without a hash to see the list of all supported hashes, e.g. Run without a hash to see the list of all supported hashes, e.g.
$ rclone hashsum $ rclone hashsum
` + hashListHelp(" ") + ` ` + hash.HelpString(4) + `
Then Then
$ rclone hashsum MD5 remote:path $ rclone hashsum MD5 remote:path
@@ -80,7 +81,7 @@ Note that hash names are case insensitive.
RunE: func(command *cobra.Command, args []string) error { RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 2, command, args) cmd.CheckArgs(0, 2, command, args)
if len(args) == 0 { if len(args) == 0 {
fmt.Print(hashListHelp("")) fmt.Print(hash.HelpString(0))
return nil return nil
} else if len(args) == 1 { } else if len(args) == 1 {
return errors.New("need hash type and remote") return errors.New("need hash type and remote")
@@ -88,12 +89,16 @@ Note that hash names are case insensitive.
var ht hash.Type var ht hash.Type
err := ht.Set(args[0]) err := ht.Set(args[0])
if err != nil { if err != nil {
fmt.Println(hashListHelp("")) fmt.Println(hash.HelpString(0))
return err return err
} }
fsrc := cmd.NewFsSrc(args[1:]) fsrc := cmd.NewFsSrc(args[1:])
cmd.Run(false, false, command, func() error { cmd.Run(false, false, command, func() error {
if ChecksumFile != "" {
fsum, sumFile := cmd.NewFsFile(ChecksumFile)
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, ht, nil, DownloadFlag)
}
if HashsumOutfile == "" { if HashsumOutfile == "" {
return operations.HashLister(context.Background(), ht, OutputBase64, DownloadFlag, fsrc, nil) return operations.HashLister(context.Background(), ht, OutputBase64, DownloadFlag, fsrc, nil)
} }
@@ -107,14 +112,3 @@ Note that hash names are case insensitive.
return nil return nil
}, },
} }
func hashListHelp(indent string) string {
var help strings.Builder
help.WriteString(indent)
help.WriteString("Supported hashes are:\n")
for _, ht := range hash.Supported().Array() {
help.WriteString(indent)
fmt.Fprintf(&help, " * %v\n", ht.String())
}
return help.String()
}

View File

@@ -32,6 +32,10 @@ hashed locally enabling MD5 for any remote.
cmd.CheckArgs(1, 1, command, args) cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args) fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error { cmd.Run(false, false, command, func() error {
if hashsum.ChecksumFile != "" {
fsum, sumFile := cmd.NewFsFile(hashsum.ChecksumFile)
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hash.MD5, nil, hashsum.DownloadFlag)
}
if hashsum.HashsumOutfile == "" { if hashsum.HashsumOutfile == "" {
return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil) return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil)
} }

View File

@@ -1,3 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd // +build linux freebsd
package mount package mount

View File

@@ -1,3 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd // +build linux freebsd
package mount package mount

View File

@@ -1,5 +1,6 @@
// FUSE main Fs // FUSE main Fs
//go:build linux || freebsd
// +build linux freebsd // +build linux freebsd
package mount package mount

View File

@@ -1,3 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd // +build linux freebsd
package mount package mount

Some files were not shown because too many files have changed in this diff Show More