mirror of
https://github.com/rclone/rclone.git
synced 2025-12-24 04:04:37 +00:00
Compare commits
264 Commits
fix-smb-wr
...
fix-7384-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45c6cf5891 | ||
|
|
6e4dd2ab96 | ||
|
|
0c17a17e19 | ||
|
|
03295bbc3c | ||
|
|
b3a1f66759 | ||
|
|
a947f75d3b | ||
|
|
ae0a4c8bbf | ||
|
|
7835991147 | ||
|
|
810644e873 | ||
|
|
8d3bcc025a | ||
|
|
0f549520ef | ||
|
|
ba16fcfaf5 | ||
|
|
68f0998699 | ||
|
|
d031cc138d | ||
|
|
e71b252b65 | ||
|
|
e9cd3e5986 | ||
|
|
4025f42bd9 | ||
|
|
b4216648e4 | ||
|
|
d8e07bfd8e | ||
|
|
199d82969b | ||
|
|
bb74a13c07 | ||
|
|
57624629d6 | ||
|
|
7c6f0cc455 | ||
|
|
422b037087 | ||
|
|
7f854acb05 | ||
|
|
bbf9b1b3d2 | ||
|
|
9cf783677e | ||
|
|
4d5d6ee61b | ||
|
|
44637dcd7f | ||
|
|
98f539de8f | ||
|
|
58fd6d7b94 | ||
|
|
9c96c13a35 | ||
|
|
f7f4651828 | ||
|
|
11afc3dde0 | ||
|
|
88e516adee | ||
|
|
fd95511091 | ||
|
|
0cac5d67ab | ||
|
|
6d6dc00abb | ||
|
|
079763f09a | ||
|
|
978cbf9360 | ||
|
|
3a50f35df9 | ||
|
|
c0968a0987 | ||
|
|
932f9ec34a | ||
|
|
0e5f12126f | ||
|
|
5c7ba0bfd3 | ||
|
|
9933d6c071 | ||
|
|
66929416d4 | ||
|
|
b06935a12e | ||
|
|
806f6ab1eb | ||
|
|
c482624a6c | ||
|
|
17fea90ac9 | ||
|
|
78176d39fd | ||
|
|
ae3c73f610 | ||
|
|
d20f647487 | ||
|
|
6521394865 | ||
|
|
42cac4cf53 | ||
|
|
223d8c5fe3 | ||
|
|
dd0e5b9a7f | ||
|
|
da244a3709 | ||
|
|
938b43c26c | ||
|
|
13fb2fb2ec | ||
|
|
43cc2435c3 | ||
|
|
1b1e43074f | ||
|
|
cacfc100de | ||
|
|
f8c5695aed | ||
|
|
a5972fe0d1 | ||
|
|
184459ba8f | ||
|
|
519fe98e6e | ||
|
|
3df6518006 | ||
|
|
1045f54128 | ||
|
|
0563cc6314 | ||
|
|
e20f2eee59 | ||
|
|
41b8935a6c | ||
|
|
fbdf71ab64 | ||
|
|
d392f9fcd8 | ||
|
|
dedad9f071 | ||
|
|
1f6271fa15 | ||
|
|
c16c22d6e1 | ||
|
|
486a10bec5 | ||
|
|
5fa13e3e31 | ||
|
|
0e746f25a3 | ||
|
|
578b9df6ea | ||
|
|
208e49ce4b | ||
|
|
7aa066cff8 | ||
|
|
64df4cf2db | ||
|
|
451d7badf7 | ||
|
|
d977fa25fa | ||
|
|
bb679a9def | ||
|
|
a3d19942bd | ||
|
|
394195cfdf | ||
|
|
3ca766b2f1 | ||
|
|
3bf8c877c3 | ||
|
|
fba2d4c4a7 | ||
|
|
8503282a5a | ||
|
|
743ea6ac26 | ||
|
|
c69eb84573 | ||
|
|
f98e672f37 | ||
|
|
242fe96b18 | ||
|
|
3f159bac16 | ||
|
|
6c58e9976c | ||
|
|
110d07548f | ||
|
|
f45cee831f | ||
|
|
ef0f3020e4 | ||
|
|
113b2b648c | ||
|
|
57ab4d279e | ||
|
|
8e21c77ead | ||
|
|
4751980659 | ||
|
|
9fe343b725 | ||
|
|
2f5685b405 | ||
|
|
c3117d9efb | ||
|
|
1ebbc74f1d | ||
|
|
aee787d33e | ||
|
|
298c13e719 | ||
|
|
f0c774156e | ||
|
|
08c460dd1a | ||
|
|
e3d0bff9ca | ||
|
|
caf5dd9d5e | ||
|
|
97d7945cef | ||
|
|
9061e81850 | ||
|
|
58339845f4 | ||
|
|
4d4f3de5a5 | ||
|
|
9bfbf2a4ae | ||
|
|
96f8b7c827 | ||
|
|
85f142a206 | ||
|
|
82b963e372 | ||
|
|
74d5477fad | ||
|
|
b5857f0bf8 | ||
|
|
edb5ccdd0b | ||
|
|
0244caf13a | ||
|
|
aaa897337d | ||
|
|
e7c002adef | ||
|
|
9e62a74a23 | ||
|
|
a10abf9934 | ||
|
|
36eb3cd660 | ||
|
|
fd2322cb41 | ||
|
|
4eed3ae99a | ||
|
|
d8855b21eb | ||
|
|
8f47b6746d | ||
|
|
cc2a4c2e20 | ||
|
|
fabeb8e44e | ||
|
|
c27977d4d5 | ||
|
|
d5d28a7513 | ||
|
|
94ccc95515 | ||
|
|
5d5473c8a5 | ||
|
|
251a8e3c39 | ||
|
|
a259226eb2 | ||
|
|
5fba502516 | ||
|
|
ba11040d6b | ||
|
|
668711e432 | ||
|
|
a71d181cb0 | ||
|
|
cab42107f7 | ||
|
|
1f9a79ef09 | ||
|
|
c0fb9ebfce | ||
|
|
e8fcde8de1 | ||
|
|
72dfdd97d8 | ||
|
|
bb88b8499b | ||
|
|
4ac5cb07ca | ||
|
|
4a3e9bbabf | ||
|
|
33376bf399 | ||
|
|
94b7c49196 | ||
|
|
a7faf05393 | ||
|
|
98a96596df | ||
|
|
88bd80c1fa | ||
|
|
c6755aa768 | ||
|
|
01be5c75be | ||
|
|
20bd17f107 | ||
|
|
64ec5709fe | ||
|
|
1ea8678be2 | ||
|
|
8341de05c6 | ||
|
|
47ca0c326e | ||
|
|
54196f34e3 | ||
|
|
9fdf3d548a | ||
|
|
10774d297a | ||
|
|
bf9053705d | ||
|
|
0bd059ec55 | ||
|
|
59d363b3c1 | ||
|
|
94a5de58c8 | ||
|
|
a466ababd0 | ||
|
|
168d577297 | ||
|
|
ddaf01ece9 | ||
|
|
b5301e03a6 | ||
|
|
e9763552f7 | ||
|
|
6b60e09ff2 | ||
|
|
41a52f50df | ||
|
|
93f35c915a | ||
|
|
a2c4f07a57 | ||
|
|
d3dcc61154 | ||
|
|
34ef5147aa | ||
|
|
aa29742be2 | ||
|
|
ef366b47f1 | ||
|
|
23abac2a59 | ||
|
|
d3ba32c43e | ||
|
|
cdf5a97bb6 | ||
|
|
e1b0417c28 | ||
|
|
acf1e2df84 | ||
|
|
831d1df67f | ||
|
|
e67157cf46 | ||
|
|
ac012618db | ||
|
|
7f09d9c2a0 | ||
|
|
0548e61910 | ||
|
|
ad83ff769b | ||
|
|
ca14b00b34 | ||
|
|
52d444f4a9 | ||
|
|
4506f35f2e | ||
|
|
4ab57eb90b | ||
|
|
23ab6fa3a0 | ||
|
|
af8ba18580 | ||
|
|
0b90dd23c1 | ||
|
|
e64be7652a | ||
|
|
179f978f75 | ||
|
|
17b7ee1f3a | ||
|
|
5c73363b16 | ||
|
|
bf21db0ac4 | ||
|
|
0180301b3f | ||
|
|
adfb1f7c7d | ||
|
|
6092fe2aaa | ||
|
|
53868ef4e1 | ||
|
|
e1ad467009 | ||
|
|
12db7b6935 | ||
|
|
7434ad8618 | ||
|
|
e4ab59bcc7 | ||
|
|
9119c6c76f | ||
|
|
9d4d294793 | ||
|
|
750ed556a5 | ||
|
|
5b0d3d060f | ||
|
|
5b0f9dc4e3 | ||
|
|
b0a87d7cf1 | ||
|
|
37d786c82a | ||
|
|
56fe12c479 | ||
|
|
9197180610 | ||
|
|
f4a538371d | ||
|
|
f2ec08cba2 | ||
|
|
8f25531b7f | ||
|
|
0ee6d0b4bf | ||
|
|
4ac4597afb | ||
|
|
143df6f6d2 | ||
|
|
8264ba987b | ||
|
|
7a27d9a192 | ||
|
|
195ad98311 | ||
|
|
29baa5888f | ||
|
|
c7a2719fac | ||
|
|
c190b9b14f | ||
|
|
5fa68e9ca5 | ||
|
|
b9727cc6ab | ||
|
|
d8d76ff647 | ||
|
|
5afa838457 | ||
|
|
2de084944b | ||
|
|
48a8bfa6b3 | ||
|
|
d3ce795c30 | ||
|
|
c04657cd4c | ||
|
|
6255d9dfaa | ||
|
|
f56ea2bee2 | ||
|
|
d6ba60c04d | ||
|
|
37eaa3682a | ||
|
|
c5f6fc3283 | ||
|
|
4daf755da0 | ||
|
|
eee8ad5146 | ||
|
|
bcb3289dad | ||
|
|
ef2ef8ef84 | ||
|
|
c69cf46f06 | ||
|
|
25f59b2918 | ||
|
|
7801b160f2 | ||
|
|
23f8dea182 | ||
|
|
3337fe31c7 |
41
.github/workflows/build.yml
vendored
41
.github/workflows/build.yml
vendored
@@ -27,12 +27,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,23 +76,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -168,7 +168,7 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -216,7 +216,6 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
|
||||
make ci_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
@@ -242,9 +241,9 @@ jobs:
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -267,12 +266,12 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
@@ -10,6 +10,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -42,7 +51,10 @@ jobs:
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
@@ -54,8 +66,12 @@ jobs:
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
|
||||
@@ -10,6 +10,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
@@ -39,6 +48,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -14,4 +14,7 @@ fuzz-build.zip
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
.DS_Store
|
||||
/docs/static/img/logos/
|
||||
resource_windows_*.syso
|
||||
.devcontainer
|
||||
|
||||
275
CONTRIBUTING.md
275
CONTRIBUTING.md
@@ -1,8 +1,8 @@
|
||||
# Contributing to rclone #
|
||||
# Contributing to rclone
|
||||
|
||||
This is a short guide on how to contribute things to rclone.
|
||||
|
||||
## Reporting a bug ##
|
||||
## Reporting a bug
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug
|
||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||
@@ -12,13 +12,13 @@ When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (e.g. output from `rclone version`)
|
||||
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
- Rclone version (e.g. output from `rclone version`)
|
||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
||||
|
||||
## Submitting a new feature or bug fix ##
|
||||
## Submitting a new feature or bug fix
|
||||
|
||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||
like to implement then please submit a pull request via GitHub.
|
||||
@@ -73,9 +73,9 @@ This is typically enough if you made a simple bug fix, otherwise please read the
|
||||
|
||||
Make sure you
|
||||
|
||||
* Add [unit tests](#testing) for a new feature.
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
- Add [unit tests](#testing) for a new feature.
|
||||
- Add [documentation](#writing-documentation) for a new feature.
|
||||
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
|
||||
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
@@ -88,9 +88,9 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and GitHub ##
|
||||
## Using Git and GitHub
|
||||
|
||||
### Committing your changes ###
|
||||
### Committing your changes
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
@@ -107,7 +107,7 @@ You can modify the message or changes in the latest commit using:
|
||||
|
||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||
|
||||
### Replacing your previously pushed commits ###
|
||||
### Replacing your previously pushed commits
|
||||
|
||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
||||
|
||||
@@ -115,7 +115,7 @@ Your previously pushed commits are replaced by:
|
||||
|
||||
git push --force origin my-new-feature
|
||||
|
||||
### Basing your changes on the latest master ###
|
||||
### Basing your changes on the latest master
|
||||
|
||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
@@ -149,13 +149,21 @@ If you squash commits that have been pushed to GitHub, then you will have to [re
|
||||
|
||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
||||
|
||||
### GitHub Continuous Integration ###
|
||||
### GitHub Continuous Integration
|
||||
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing ##
|
||||
## Testing
|
||||
|
||||
### Quick testing ###
|
||||
### Code quality tests
|
||||
|
||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
||||
|
||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
||||
|
||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
||||
|
||||
### Quick testing
|
||||
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
@@ -168,7 +176,7 @@ You can also use `make`, if supported by your platform
|
||||
|
||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
||||
|
||||
### Backend testing ###
|
||||
### Backend testing
|
||||
|
||||
rclone contains a mixture of unit tests and integration tests.
|
||||
Because it is difficult (and in some respects pointless) to test cloud
|
||||
@@ -203,7 +211,7 @@ project root:
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
### Full integration testing ###
|
||||
### Full integration testing
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
@@ -218,55 +226,56 @@ The commands may require some extra go packages which you can install with
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
## Code Organisation
|
||||
|
||||
Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* all - import this to load all the cloud providers
|
||||
* ...providers
|
||||
* bin - scripts for use while building or maintaining rclone
|
||||
* cmd - the rclone commands
|
||||
* all - import this to load all the commands
|
||||
* ...commands
|
||||
* cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* command - these are auto-generated - edit the corresponding .go file
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
* config - manage the config file and flags
|
||||
* driveletter - detect if a name is a drive letter
|
||||
* filter - implements include/exclude filtering
|
||||
* fserrors - rclone specific error handling
|
||||
* fshttp - http handling for rclone
|
||||
* fspath - path handling for rclone
|
||||
* hash - defines rclone's hash types and functions
|
||||
* list - list a remote
|
||||
* log - logging facilities
|
||||
* march - iterates directories in lock step
|
||||
* object - in memory Fs objects
|
||||
* operations - primitives for sync, e.g. Copy, Move
|
||||
* sync - sync directories
|
||||
* walk - walk a directory
|
||||
* fstest - provides integration test framework
|
||||
* fstests - integration tests for the backends
|
||||
* mockdir - mocks an fs.Directory
|
||||
* mockobject - mocks an fs.Object
|
||||
* test_all - Runs integration tests for everything
|
||||
* graphics - the images used in the website, etc.
|
||||
* lib - libraries used by the backend
|
||||
* atexit - register functions to run when rclone exits
|
||||
* dircache - directory ID to name caching
|
||||
* oauthutil - helpers for using oauth
|
||||
* pacer - retries with backoff and paces operations
|
||||
* readers - a selection of useful io.Readers
|
||||
* rest - a thin abstraction over net/http for REST
|
||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
- backend - the rclone backends for interfacing to cloud providers -
|
||||
- all - import this to load all the cloud providers
|
||||
- ...providers
|
||||
- bin - scripts for use while building or maintaining rclone
|
||||
- cmd - the rclone commands
|
||||
- all - import this to load all the commands
|
||||
- ...commands
|
||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||
- docs - the documentation and website
|
||||
- content - adjust these docs only - everything else is autogenerated
|
||||
- command - these are auto-generated - edit the corresponding .go file
|
||||
- fs - main rclone definitions - minimal amount of code
|
||||
- accounting - bandwidth limiting and statistics
|
||||
- asyncreader - an io.Reader which reads ahead
|
||||
- config - manage the config file and flags
|
||||
- driveletter - detect if a name is a drive letter
|
||||
- filter - implements include/exclude filtering
|
||||
- fserrors - rclone specific error handling
|
||||
- fshttp - http handling for rclone
|
||||
- fspath - path handling for rclone
|
||||
- hash - defines rclone's hash types and functions
|
||||
- list - list a remote
|
||||
- log - logging facilities
|
||||
- march - iterates directories in lock step
|
||||
- object - in memory Fs objects
|
||||
- operations - primitives for sync, e.g. Copy, Move
|
||||
- sync - sync directories
|
||||
- walk - walk a directory
|
||||
- fstest - provides integration test framework
|
||||
- fstests - integration tests for the backends
|
||||
- mockdir - mocks an fs.Directory
|
||||
- mockobject - mocks an fs.Object
|
||||
- test_all - Runs integration tests for everything
|
||||
- graphics - the images used in the website, etc.
|
||||
- lib - libraries used by the backend
|
||||
- atexit - register functions to run when rclone exits
|
||||
- dircache - directory ID to name caching
|
||||
- oauthutil - helpers for using oauth
|
||||
- pacer - retries with backoff and paces operations
|
||||
- readers - a selection of useful io.Readers
|
||||
- rest - a thin abstraction over net/http for REST
|
||||
- librclone - in memory interface to rclone's API for embedding rclone
|
||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
## Writing Documentation ##
|
||||
## Writing Documentation
|
||||
|
||||
If you are adding a new feature then please update the documentation.
|
||||
|
||||
@@ -277,22 +286,22 @@ alphabetical order.
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field.
|
||||
|
||||
* Start with the most important information about the option,
|
||||
- Start with the most important information about the option,
|
||||
as a single sentence on a single line.
|
||||
* This text will be used for the command-line flag help.
|
||||
* It will be combined with other information, such as any default value,
|
||||
- This text will be used for the command-line flag help.
|
||||
- It will be combined with other information, such as any default value,
|
||||
and the result will look odd if not written as a single sentence.
|
||||
* It should end with a period/full stop character, which will be shown
|
||||
- It should end with a period/full stop character, which will be shown
|
||||
in docs but automatically removed when producing the flag help.
|
||||
* Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
* More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||
* Like with docs generated from Markdown, a single line break is ignored
|
||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||
- Like with docs generated from Markdown, a single line break is ignored
|
||||
and two line breaks creates a new paragraph.
|
||||
* This text will be shown to the user in `rclone config`
|
||||
- This text will be shown to the user in `rclone config`
|
||||
and in the docs (where it will be added by `make backenddocs`,
|
||||
normally run some time before next release).
|
||||
* To create options of enumeration type use the `Examples:` field.
|
||||
* Each example value have their own `Help:` field, but they are treated
|
||||
- To create options of enumeration type use the `Examples:` field.
|
||||
- Each example value have their own `Help:` field, but they are treated
|
||||
a bit different than the main option help text. They will be shown
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
@@ -312,12 +321,12 @@ combined unmodified with other information (such as any default value).
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
|
||||
## Making a release ##
|
||||
## Making a release
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
file.
|
||||
|
||||
## Commit messages ##
|
||||
## Commit messages
|
||||
|
||||
Please make the first line of your commit message a summary of the
|
||||
change that a user (not a developer) of rclone would like to read, and
|
||||
@@ -358,7 +367,7 @@ error fixing the hang.
|
||||
Fixes #1498
|
||||
```
|
||||
|
||||
## Adding a dependency ##
|
||||
## Adding a dependency
|
||||
|
||||
rclone uses the [go
|
||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||
@@ -370,7 +379,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
`go.mod` and `go.sum`.
|
||||
|
||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||
go get github.com/ncw/new_dependency
|
||||
|
||||
You can add constraints on that package when doing `go get` (see the
|
||||
go docs linked above), but don't unless you really need to.
|
||||
@@ -378,15 +387,15 @@ go docs linked above), but don't unless you really need to.
|
||||
Please check in the changes generated by `go mod` including `go.mod`
|
||||
and `go.sum` in the same commit as your other changes.
|
||||
|
||||
## Updating a dependency ##
|
||||
## Updating a dependency
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
GO111MODULE=on go get -u golang.org/x/crypto
|
||||
go get golang.org/x/crypto
|
||||
|
||||
Check in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
## Updating all the dependencies
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just uses the go modules to update all the modules to their latest
|
||||
@@ -395,7 +404,7 @@ stable release. Check in the changes in a single commit as above.
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
|
||||
## Updating a backend ##
|
||||
## Updating a backend
|
||||
|
||||
If you update a backend then please run the unit tests and the
|
||||
integration tests for that backend.
|
||||
@@ -410,76 +419,82 @@ integration tests.
|
||||
|
||||
The next section goes into more detail about the tests.
|
||||
|
||||
## Writing a new backend ##
|
||||
## Writing a new backend
|
||||
|
||||
Choose a name. The docs here will use `remote` as an example.
|
||||
|
||||
Note that in rclone terminology a file system backend is called a
|
||||
remote or an fs.
|
||||
|
||||
Research
|
||||
### Research
|
||||
|
||||
* Look at the interfaces defined in `fs/types.go`
|
||||
* Study one or more of the existing remotes
|
||||
- Look at the interfaces defined in `fs/types.go`
|
||||
- Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
### Getting going
|
||||
|
||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory-based remote
|
||||
* b2 is a good one to start from if you have a bucket-based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
||||
- b2 is a good one to start from if you have a bucket-based remote
|
||||
- Add your remote to the imports in `backend/all/all.go`
|
||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
- `rclone purge -v TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
- open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Important:
|
||||
### Guidelines for a speedy merge
|
||||
|
||||
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
|
||||
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
||||
|
||||
Unit tests
|
||||
### Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||
* Make sure all tests pass with `go test -v`
|
||||
- Create a config entry called `TestRemote` for the unit tests to use
|
||||
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||
- Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
### Integration tests
|
||||
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backends remote
|
||||
- Add your backend to `fstest/test_all/config.yaml`
|
||||
- Once you've done that then you can use the integration test framework from the project root:
|
||||
- go install ./...
|
||||
- test_all -backends remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* `cd fs/sync`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If your remote defines `ListR` check with this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
- Make sure integration tests pass with
|
||||
- `cd fs/operations`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- `cd fs/sync`
|
||||
- `go test -v -remote TestRemote:`
|
||||
- If your remote defines `ListR` check with this also
|
||||
- `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from
|
||||
### Backend documentation
|
||||
|
||||
Add your backend to the docs - you'll need to pick an icon for it from
|
||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
* update them in your backend with `bin/make_backend_docs.py remote`
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/_index.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
- `README.md` - main GitHub page
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
- `bin/make_manual.py` - add the page to the `docs` constant
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
@@ -524,13 +539,13 @@ in the names so if these fail and the provider doesn't support
|
||||
|
||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
||||
|
||||
## Writing a plugin ##
|
||||
## Writing a plugin
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
Usage
|
||||
### Usage
|
||||
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
@@ -545,7 +560,7 @@ Usage
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
Building
|
||||
### Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
FROM golang AS builder
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
|
||||
5974
MANUAL.html
generated
5974
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
5500
MANUAL.txt
generated
5500
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
15
Makefile
15
Makefile
@@ -30,6 +30,7 @@ ifdef RELEASE_TAG
|
||||
TAG := $(RELEASE_TAG)
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_OS := $(shell go env GOOS)
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
@@ -46,7 +47,13 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
ifeq ($(GO_OS),windows)
|
||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
ifeq ($(GO_OS),windows)
|
||||
rm resource_windows_`go env GOARCH`.syso
|
||||
endif
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
@@ -96,16 +103,12 @@ check: rclone
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
GOOS="" GOARCH="" go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest
|
||||
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@@ -150,7 +153,7 @@ rcdocs: rclone
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
|
||||
|
||||
clean:
|
||||
go clean ./...
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
[<img src="https://rclone.org/img/logos/warp-github-light.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logos/warp-github-dark.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -23,7 +25,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
@@ -46,6 +47,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
@@ -53,12 +55,14 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
@@ -118,6 +122,7 @@ These backends adapt or modify other storage providers
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
|
||||
53
RELEASE.md
53
RELEASE.md
@@ -41,12 +41,15 @@ Early in the next release cycle update the dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make updatedirect
|
||||
* make
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* git commit -a -v
|
||||
* make update
|
||||
* make
|
||||
* make GOTAGS=cmount
|
||||
* make compiletest
|
||||
* roll back any updates which didn't compile
|
||||
* git commit -a -v --amend
|
||||
* **NB** watch out for this changing the default go version in `go.mod`
|
||||
|
||||
Note that `make update` updates all direct and indirect dependencies
|
||||
and there can occasionally be forwards compatibility problems with
|
||||
@@ -90,6 +93,13 @@ Now
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
|
||||
## Sponsor logos
|
||||
|
||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
||||
|
||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
||||
which is a private repo containing artwork from sponsors.
|
||||
|
||||
## Update the website between releases
|
||||
|
||||
Create an update website branch based off the last release
|
||||
@@ -114,32 +124,21 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||
To do a basic build of rclone's docker image to debug builds locally:
|
||||
|
||||
```
|
||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||
docker run --rm rclone/rclone:testing version
|
||||
```
|
||||
|
||||
To test the multipatform build
|
||||
|
||||
```
|
||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||
```
|
||||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
```
|
||||
git co v1.54.1
|
||||
docker pull golang
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
docker buildx create --name actions_builder --use
|
||||
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx stop actions_builder
|
||||
```
|
||||
|
||||
### Old build for linux/amd64 only
|
||||
|
||||
```
|
||||
docker pull golang
|
||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
docker push rclone/rclone:1.52.0
|
||||
docker push rclone/rclone:1.52
|
||||
docker push rclone/rclone:1
|
||||
docker push rclone/rclone:latest
|
||||
```
|
||||
|
||||
@@ -4,8 +4,8 @@ package all
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
@@ -24,9 +24,11 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/imagekit"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/linkbox"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,21 +0,0 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
//go:build acd
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -295,10 +296,10 @@ avoid the time out.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access_tier",
|
||||
Help: `Access tier of blob: hot, cool or archive.
|
||||
Help: `Access tier of blob: hot, cool, cold or archive.
|
||||
|
||||
Archived blobs can be restored by setting access tier to hot or
|
||||
cool. Leave blank if you intend to use default access tier, which is
|
||||
Archived blobs can be restored by setting access tier to hot, cool or
|
||||
cold. Leave blank if you intend to use default access tier, which is
|
||||
set at account level
|
||||
|
||||
If there is no "access tier" specified, rclone doesn't apply any tier.
|
||||
@@ -306,7 +307,7 @@ rclone performs "Set Tier" operation on blobs while uploading, if objects
|
||||
are not modified, specifying "access tier" to new one will have no effect.
|
||||
If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
tiering blob to "Hot", "Cool" or "Cold".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "archive_tier_delete",
|
||||
@@ -401,6 +402,24 @@ rclone does if you know the container exists already.
|
||||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "delete_snapshots",
|
||||
Help: `Set to specify how to deal with snapshots on blob deletion.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "",
|
||||
Help: "By default, the delete operation fails if a blob has snapshots",
|
||||
}, {
|
||||
Value: string(blob.DeleteSnapshotsOptionTypeInclude),
|
||||
Help: "Specify 'include' to remove the root blob and all its snapshots",
|
||||
}, {
|
||||
Value: string(blob.DeleteSnapshotsOptionTypeOnly),
|
||||
Help: "Specify 'only' to remove only the snapshots but keep the root blob.",
|
||||
},
|
||||
},
|
||||
Default: "",
|
||||
Exclusive: true,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -437,6 +456,7 @@ type Options struct {
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
NoCheckContainer bool `config:"no_check_container"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
DeleteSnapshots string `config:"delete_snapshots"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -520,6 +540,7 @@ func (o *Object) split() (container, containerPath string) {
|
||||
func validateAccessTier(tier string) bool {
|
||||
return strings.EqualFold(tier, string(blob.AccessTierHot)) ||
|
||||
strings.EqualFold(tier, string(blob.AccessTierCool)) ||
|
||||
strings.EqualFold(tier, string(blob.AccessTierCold)) ||
|
||||
strings.EqualFold(tier, string(blob.AccessTierArchive))
|
||||
}
|
||||
|
||||
@@ -649,8 +670,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.AccessTier == "" {
|
||||
opt.AccessTier = string(defaultAccessTier)
|
||||
} else if !validateAccessTier(opt.AccessTier) {
|
||||
return nil, fmt.Errorf("supported access tiers are %s, %s and %s",
|
||||
string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierArchive))
|
||||
return nil, fmt.Errorf("supported access tiers are %s, %s, %s and %s",
|
||||
string(blob.AccessTierHot), string(blob.AccessTierCool), string(blob.AccessTierCold), string(blob.AccessTierArchive))
|
||||
}
|
||||
|
||||
if !validatePublicAccess((opt.PublicAccess)) {
|
||||
@@ -1899,7 +1920,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
var offset int64
|
||||
var count int64
|
||||
if o.AccessTier() == blob.AccessTierArchive {
|
||||
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot or cool first")
|
||||
return nil, fmt.Errorf("blob in archive tier, you need to set tier to hot, cool, cold first")
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
@@ -1965,34 +1986,21 @@ func (rs *readSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// increment the array as LSB binary
|
||||
func increment(xs *[8]byte) {
|
||||
for i, digit := range xs {
|
||||
newDigit := digit + 1
|
||||
xs[i] = newDigit
|
||||
if newDigit >= digit {
|
||||
// exit if no carry
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// record chunk number and id for Close
|
||||
type azBlock struct {
|
||||
chunkNumber int
|
||||
chunkNumber uint64
|
||||
id string
|
||||
}
|
||||
|
||||
// Implements the fs.ChunkWriter interface
|
||||
type azChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
ui uploadInfo
|
||||
blocksMu sync.Mutex // protects the below
|
||||
blocks []azBlock // list of blocks for finalize
|
||||
binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
o *Object
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
ui uploadInfo
|
||||
blocksMu sync.Mutex // protects the below
|
||||
blocks []azBlock // list of blocks for finalize
|
||||
o *Object
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
@@ -2080,13 +2088,14 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(&w.binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:])
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
binary.LittleEndian.PutUint64(binaryBlockID[:], uint64(chunkNumber))
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID[:])
|
||||
|
||||
// Save the blockID for the commit
|
||||
w.blocksMu.Lock()
|
||||
w.blocks = append(w.blocks, azBlock{
|
||||
chunkNumber: chunkNumber,
|
||||
chunkNumber: uint64(chunkNumber),
|
||||
id: blockID,
|
||||
})
|
||||
w.blocksMu.Unlock()
|
||||
@@ -2151,9 +2160,20 @@ func (w *azChunkWriter) Close(ctx context.Context) (err error) {
|
||||
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
|
||||
})
|
||||
|
||||
// Create a list of block IDs
|
||||
// Create and check a list of block IDs
|
||||
blockIDs := make([]string, len(w.blocks))
|
||||
for i := range w.blocks {
|
||||
if w.blocks[i].chunkNumber != uint64(i) {
|
||||
return fmt.Errorf("internal error: expecting chunkNumber %d but got %d", i, w.blocks[i].chunkNumber)
|
||||
}
|
||||
chunkBytes, err := base64.StdEncoding.DecodeString(w.blocks[i].id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("internal error: bad block ID: %w", err)
|
||||
}
|
||||
chunkNumber := binary.LittleEndian.Uint64(chunkBytes)
|
||||
if w.blocks[i].chunkNumber != chunkNumber {
|
||||
return fmt.Errorf("internal error: expecting decoded chunkNumber %d but got %d", w.blocks[i].chunkNumber, chunkNumber)
|
||||
}
|
||||
blockIDs[i] = w.blocks[i].id
|
||||
}
|
||||
|
||||
@@ -2355,9 +2375,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
blb := o.getBlobSVC()
|
||||
//only := blob.DeleteSnapshotsOptionTypeOnly
|
||||
opt := blob.DeleteOptions{
|
||||
//DeleteSnapshots: &only,
|
||||
opt := blob.DeleteOptions{}
|
||||
if o.fs.opt.DeleteSnapshots != "" {
|
||||
action := blob.DeleteSnapshotsOptionType(o.fs.opt.DeleteSnapshots)
|
||||
opt.DeleteSnapshots = &action
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.Delete(ctx, &opt)
|
||||
|
||||
@@ -17,21 +17,3 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in [8]byte
|
||||
want [8]byte
|
||||
}{
|
||||
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
|
||||
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
|
||||
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
|
||||
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
|
||||
} {
|
||||
increment(&test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
@@ -35,7 +35,7 @@ func TestIntegration2(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
@@ -62,6 +62,7 @@ func TestValidateAccessTier(t *testing.T) {
|
||||
"HOT": {"HOT", true},
|
||||
"Hot": {"Hot", true},
|
||||
"cool": {"cool", true},
|
||||
"cold": {"cold", true},
|
||||
"archive": {"archive", true},
|
||||
"empty": {"", false},
|
||||
"unknown": {"unknown", false},
|
||||
|
||||
1367
backend/azurefiles/azurefiles.go
Normal file
1367
backend/azurefiles/azurefiles.go
Normal file
File diff suppressed because it is too large
Load Diff
70
backend/azurefiles/azurefiles_internal_test.go
Normal file
70
backend/azurefiles/azurefiles_internal_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package azurefiles
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Authentication", f.InternalTestAuth)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
func (f *Fs) InternalTestAuth(t *testing.T) {
|
||||
t.Skip("skipping since this requires authentication credentials which are not part of repo")
|
||||
shareName := "test-rclone-oct-2023"
|
||||
testCases := []struct {
|
||||
name string
|
||||
options *Options
|
||||
}{
|
||||
{
|
||||
name: "ConnectionString",
|
||||
options: &Options{
|
||||
ShareName: shareName,
|
||||
ConnectionString: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AccountAndKey",
|
||||
options: &Options{
|
||||
ShareName: shareName,
|
||||
Account: "",
|
||||
Key: "",
|
||||
}},
|
||||
{
|
||||
name: "SASUrl",
|
||||
options: &Options{
|
||||
ShareName: shareName,
|
||||
SASURL: "",
|
||||
}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
|
||||
assert.NoError(t, err)
|
||||
dirName := randomString(10)
|
||||
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
||||
|
||||
func randomString(charCount int) string {
|
||||
strBldr := strings.Builder{}
|
||||
for i := 0; i < charCount; i++ {
|
||||
randPos := rand.Int63n(52)
|
||||
strBldr.WriteByte(chars[randPos])
|
||||
}
|
||||
return strBldr.String()
|
||||
}
|
||||
18
backend/azurefiles/azurefiles_test.go
Normal file
18
backend/azurefiles/azurefiles_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package azurefiles
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
var objPtr *Object
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureFiles:",
|
||||
NilObject: objPtr,
|
||||
})
|
||||
}
|
||||
7
backend/azurefiles/azurefiles_unsupported.go
Normal file
7
backend/azurefiles/azurefiles_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js
|
||||
// +build plan9 js
|
||||
|
||||
package azurefiles
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
@@ -399,11 +400,18 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
errResponse := new(api.Error)
|
||||
err := rest.DecodeJSON(resp, &errResponse)
|
||||
body, err := rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||
fs.Errorf(nil, "Couldn't read error out of body: %v", err)
|
||||
body = nil
|
||||
}
|
||||
// Decode error response if there was one - they can be blank
|
||||
errResponse := new(api.Error)
|
||||
if len(body) > 0 {
|
||||
err = json.Unmarshal(body, errResponse)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't decode error response: %v", err)
|
||||
}
|
||||
}
|
||||
if errResponse.Code == "" {
|
||||
errResponse.Code = "unknown"
|
||||
@@ -447,6 +455,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
@@ -497,10 +513,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
ChunkWriterDoesntSeek: true,
|
||||
}).Fill(ctx, f)
|
||||
// Set the test flag if required
|
||||
if opt.TestMode != "" {
|
||||
@@ -1321,7 +1338,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
|
||||
if srcObj.size >= int64(f.opt.CopyCutoff) {
|
||||
if srcObj.size > int64(f.opt.CopyCutoff) {
|
||||
if newInfo == nil {
|
||||
newInfo, err = srcObj.getMetaData(ctx)
|
||||
if err != nil {
|
||||
@@ -1332,7 +1349,11 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Copy(ctx)
|
||||
err = up.Copy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dstObj.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
@@ -1919,7 +1940,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
// NB Stream returns the buffer and token
|
||||
return up.Stream(ctx, rw)
|
||||
err = up.Stream(ctx, rw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataFileInfo(up.info)
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putRW(rw)
|
||||
@@ -2063,7 +2088,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
bucket, _ := o.split()
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
@@ -168,3 +169,10 @@ func TestParseTimeString(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Internal tests go here
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -28,7 +28,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -393,10 +393,11 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW)
|
||||
hasMoreParts = true
|
||||
)
|
||||
up.size = initialUploadBlock.Size()
|
||||
up.parts = 0
|
||||
for part := 0; hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var rw *pool.RW
|
||||
if part == 1 {
|
||||
if part == 0 {
|
||||
rw = initialUploadBlock
|
||||
} else {
|
||||
rw = up.f.getRW(false)
|
||||
@@ -411,12 +412,18 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW)
|
||||
|
||||
// Read the chunk
|
||||
var n int64
|
||||
if part == 1 {
|
||||
if part == 0 {
|
||||
n = rw.Size()
|
||||
} else {
|
||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
||||
if err == io.EOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
if n == 0 {
|
||||
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
|
||||
up.f.putRW(rw)
|
||||
break
|
||||
} else {
|
||||
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
|
||||
}
|
||||
hasMoreParts = false
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
@@ -426,7 +433,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW)
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.parts += 1
|
||||
up.size += n
|
||||
if part > maxParts {
|
||||
up.f.putRW(rw)
|
||||
@@ -456,7 +463,7 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||
remaining = up.size
|
||||
)
|
||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||
for part := 0; part <= up.parts; part++ {
|
||||
for part := 0; part < up.parts; part++ {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
|
||||
@@ -167,19 +167,7 @@ type PreUploadCheckResponse struct {
|
||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
||||
type PreUploadCheckConflict struct {
|
||||
Conflicts struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
FileVersion struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
Sha1 string `json:"sha1"`
|
||||
} `json:"file_version"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
} `json:"conflicts"`
|
||||
Conflicts ItemMini `json:"conflicts"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
|
||||
@@ -380,7 +380,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
// defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -389,20 +389,30 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(ctx, directoryID, false, true, true, func(item *api.Item) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
info = item
|
||||
return true
|
||||
}
|
||||
return false
|
||||
// Use preupload to find the ID
|
||||
itemMini, err := f.preUploadCheck(ctx, leaf, directoryID, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if itemMini == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Now we have the ID we can look up the object proper
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/files/" + itemMini.ID,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
var item api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return info, nil
|
||||
return &item, nil
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
@@ -762,7 +772,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
//
|
||||
// It returns "", nil if the file is good to go
|
||||
// It returns "ID", nil if the file must be updated
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
|
||||
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (item *api.ItemMini, err error) {
|
||||
check := api.PreUploadCheck{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
@@ -787,16 +797,16 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
||||
var conflict api.PreUploadCheckConflict
|
||||
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||
return nil, fmt.Errorf("pre-upload check: JSON decode failed: %w", err)
|
||||
}
|
||||
if conflict.Conflicts.Type != api.ItemTypeFile {
|
||||
return "", fmt.Errorf("pre-upload check: can't overwrite non file with file: %w", err)
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
return conflict.Conflicts.ID, nil
|
||||
return &conflict.Conflicts, nil
|
||||
}
|
||||
return "", fmt.Errorf("pre-upload check: %w", err)
|
||||
return nil, fmt.Errorf("pre-upload check: %w", err)
|
||||
}
|
||||
return "", nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
@@ -817,11 +827,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// Preflight check the upload, which returns the ID if the
|
||||
// object already exists
|
||||
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ID == "" {
|
||||
if item == nil {
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
@@ -829,7 +839,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: ID,
|
||||
id: item.ID,
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
@@ -1197,6 +1207,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
@@ -1709,6 +1725,7 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -325,6 +325,14 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
}
|
||||
}
|
||||
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Note 1: the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs.
|
||||
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -172,6 +173,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
opt: *opt,
|
||||
mode: compressionModeFromName(opt.CompressionMode),
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -253,6 +253,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
cipher: cipher,
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/time/rate"
|
||||
drive_v2 "google.golang.org/api/drive/v2"
|
||||
drive "google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
@@ -69,12 +70,14 @@ const (
|
||||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,sha1Checksum,sha256Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
defaultXDGIcon = "text-html"
|
||||
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
|
||||
defaultChunkSize = 8 * fs.Mebi
|
||||
partialFields = "id,name,size,md5Checksum,sha1Checksum,sha256Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
defaultXDGIcon = "text-html"
|
||||
uploadsPerSecond = 3.0 // default number of uploads per second
|
||||
uploadsPerSecondBurst = 3 // burst for the above
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -143,6 +146,41 @@ var (
|
||||
_linkTemplates map[string]*template.Template // available link types
|
||||
)
|
||||
|
||||
// rwChoices type for fs.Bits
|
||||
type rwChoices struct{}
|
||||
|
||||
func (rwChoices) Choices() []fs.BitsChoicesInfo {
|
||||
return []fs.BitsChoicesInfo{
|
||||
{Bit: uint64(rwOff), Name: "off"},
|
||||
{Bit: uint64(rwRead), Name: "read"},
|
||||
{Bit: uint64(rwWrite), Name: "write"},
|
||||
}
|
||||
}
|
||||
|
||||
// rwChoice type alias
|
||||
type rwChoice = fs.Bits[rwChoices]
|
||||
|
||||
const (
|
||||
rwRead rwChoice = 1 << iota
|
||||
rwWrite
|
||||
rwOff rwChoice = 0
|
||||
)
|
||||
|
||||
// Examples for the options
|
||||
var rwExamples = fs.OptionExamples{{
|
||||
Value: rwOff.String(),
|
||||
Help: "Do not read or write the value",
|
||||
}, {
|
||||
Value: rwRead.String(),
|
||||
Help: "Read the value only",
|
||||
}, {
|
||||
Value: rwWrite.String(),
|
||||
Help: "Write the value only",
|
||||
}, {
|
||||
Value: (rwRead | rwWrite).String(),
|
||||
Help: "Read and Write the value.",
|
||||
}}
|
||||
|
||||
// Parse the scopes option returning a slice of scopes
|
||||
func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
@@ -250,9 +288,13 @@ func init() {
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `User metadata is stored in the properties field of the drive object.`,
|
||||
},
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
Help: "Comma separated list of scopes that rclone should use when requesting access from drive.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "drive",
|
||||
Help: "Full access all files, excluding Application Data Folder.",
|
||||
@@ -320,6 +362,25 @@ rather than shortcuts themselves when doing server side copies.`,
|
||||
Default: false,
|
||||
Help: "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "show_all_gdocs",
|
||||
Default: false,
|
||||
Help: `Show all Google Docs including non-exportable ones in listings.
|
||||
|
||||
If you try a server side copy on a Google Form without this flag, you
|
||||
will get this error:
|
||||
|
||||
No export formats found for "application/vnd.google-apps.form"
|
||||
|
||||
However adding this flag will allow the form to be server side copied.
|
||||
|
||||
Note that rclone doesn't add extensions to the Google Docs file names
|
||||
in this mode.
|
||||
|
||||
Do **not** use this flag when trying to download Google Docs - rclone
|
||||
will fail to download them.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_checksum_gphotos",
|
||||
Default: false,
|
||||
@@ -500,6 +561,16 @@ need to use --ignore size also.`,
|
||||
Default: defaultBurst,
|
||||
Help: "Number of API calls to allow without sleeping.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "uploads_per_second",
|
||||
Default: uploadsPerSecond,
|
||||
Help: "Number of uploads per second limit.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "uploads_per_second_burst",
|
||||
Default: uploadsPerSecondBurst,
|
||||
Help: "Burst for number of uploads per second limit.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
@@ -620,6 +691,56 @@ having trouble with like many empty directories.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: true,
|
||||
}, {
|
||||
Name: "metadata_owner",
|
||||
Help: `Control whether owner should be read or written in metadata.
|
||||
|
||||
Owner is a standard part of the file metadata so is easy to read. But it
|
||||
isn't always desirable to set the owner from the metadata.
|
||||
|
||||
Note that you can't set the owner on Shared Drives, and that setting
|
||||
ownership will generate an email to the new owner (this can't be
|
||||
disabled), and you can't transfer ownership to someone outside your
|
||||
organization.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: rwRead,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: "metadata_permissions",
|
||||
Help: `Control whether permissions should be read or written in metadata.
|
||||
|
||||
Reading permissions metadata from files can be done quickly, but it
|
||||
isn't always desirable to set the permissions from the metadata.
|
||||
|
||||
Note that rclone drops any inherited permissions on Shared Drives and
|
||||
any owner permission on My Drives as these are duplicated in the owner
|
||||
metadata.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: rwOff,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: "metadata_labels",
|
||||
Help: `Control whether labels should be read or written in metadata.
|
||||
|
||||
Reading labels metadata from files takes an extra API transaction and
|
||||
will slow down listings. It isn't always desirable to set the labels
|
||||
from the metadata.
|
||||
|
||||
The format of labels is documented in the drive API documentation at
|
||||
https://developers.google.com/drive/api/reference/rest/v3/Label -
|
||||
rclone just provides a JSON dump of this format.
|
||||
|
||||
When setting labels, the label and fields must already exist - rclone
|
||||
will not create them. This means that if you are transferring labels
|
||||
from two different accounts you will have to create the labels in
|
||||
advance and use the metadata mapper to translate the IDs between the
|
||||
two accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: rwOff,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -667,6 +788,7 @@ type Options struct {
|
||||
UseTrash bool `config:"use_trash"`
|
||||
CopyShortcutContent bool `config:"copy_shortcut_content"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
ShowAllGdocs bool `config:"show_all_gdocs"`
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
@@ -687,6 +809,8 @@ type Options struct {
|
||||
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
PacerBurst int `config:"pacer_burst"`
|
||||
UploadsPerSecond float64 `config:"uploads_per_second"`
|
||||
UploadsPerSecondBurst int `config:"uploads_per_second_burst"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
@@ -695,6 +819,9 @@ type Options struct {
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
FastListBugFix bool `config:"fast_list_bug_fix"`
|
||||
MetadataOwner rwChoice `config:"metadata_owner"`
|
||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||
MetadataLabels rwChoice `config:"metadata_labels"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
@@ -716,23 +843,26 @@ type Fs struct {
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
fileFields googleapi.Field // fields to fetch file info with
|
||||
m configmap.Mapper
|
||||
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
||||
listRmu *sync.Mutex // protects listRempties
|
||||
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
||||
dirResourceKeys *sync.Map // map directory ID to resource key
|
||||
grouping int32 // number of IDs to search at once in ListR - read with atomic
|
||||
listRmu *sync.Mutex // protects listRempties
|
||||
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
|
||||
dirResourceKeys *sync.Map // map directory ID to resource key
|
||||
permissionsMu *sync.Mutex // protect the below
|
||||
permissions map[string]*drive.Permission // map permission IDs to Permissions
|
||||
uploadsLimiter *rate.Limiter // rate limit uploads
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // Drive Id of this object
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents []string // IDs of the parent directories
|
||||
resourceKey *string // resourceKey is needed for link shared objects
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // Drive Id of this object
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents []string // IDs of the parent directories
|
||||
resourceKey *string // resourceKey is needed for link shared objects
|
||||
metadata *fs.Metadata // metadata if known
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -981,7 +1111,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
@@ -1255,9 +1385,11 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
listRmu: new(sync.Mutex),
|
||||
listRempties: make(map[string]struct{}),
|
||||
dirResourceKeys: new(sync.Map),
|
||||
permissionsMu: new(sync.Mutex),
|
||||
permissions: make(map[string]*drive.Permission),
|
||||
uploadsLimiter: rate.NewLimiter(rate.Limit(opt.UploadsPerSecond), opt.UploadsPerSecondBurst),
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.fileFields = f.getFileFields()
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
@@ -1265,6 +1397,9 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
FilterAware: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -1369,7 +1504,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
func (f *Fs) newBaseObject(ctx context.Context, remote string, info *drive.File) (o baseObject, err error) {
|
||||
modifiedDate := info.ModifiedTime
|
||||
if f.opt.UseCreatedDate {
|
||||
modifiedDate = info.CreatedTime
|
||||
@@ -1380,7 +1515,7 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
if f.opt.SizeAsQuota {
|
||||
size = info.QuotaBytesUsed
|
||||
}
|
||||
return baseObject{
|
||||
o = baseObject{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
id: info.Id,
|
||||
@@ -1389,10 +1524,15 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
bytes: size,
|
||||
parents: info.Parents,
|
||||
}
|
||||
err = nil
|
||||
if fs.GetConfig(ctx).Metadata {
|
||||
err = o.parseMetadata(ctx, info)
|
||||
}
|
||||
return o, err
|
||||
}
|
||||
|
||||
// getFileFields gets the fields for a normal file Get or List
|
||||
func (f *Fs) getFileFields() (fields googleapi.Field) {
|
||||
func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
|
||||
fields = partialFields
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
@@ -1406,11 +1546,14 @@ func (f *Fs) getFileFields() (fields googleapi.Field) {
|
||||
if f.opt.SizeAsQuota {
|
||||
fields += ",quotaBytesUsed"
|
||||
}
|
||||
if fs.GetConfig(ctx).Metadata {
|
||||
fields += "," + metadataFields
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// newRegularObject creates an fs.Object for a normal drive.File
|
||||
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
for _, space := range info.Spaces {
|
||||
@@ -1423,27 +1566,33 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
}
|
||||
}
|
||||
o := &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||
md5sum: strings.ToLower(info.Md5Checksum),
|
||||
sha1sum: strings.ToLower(info.Sha1Checksum),
|
||||
sha256sum: strings.ToLower(info.Sha256Checksum),
|
||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||
}
|
||||
o.baseObject, err = f.newBaseObject(ctx, remote, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.ResourceKey != "" {
|
||||
o.resourceKey = &info.ResourceKey
|
||||
}
|
||||
return o
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// newDocumentObject creates an fs.Object for a google docs drive.File
|
||||
func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
func (f *Fs) newDocumentObject(ctx context.Context, remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
mediaType, _, err := mime.ParseMediaType(exportMimeType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
url := info.ExportLinks[mediaType]
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
baseObject, err := f.newBaseObject(ctx, remote+extension, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseObject.bytes = -1
|
||||
baseObject.mimeType = exportMimeType
|
||||
return &documentObject{
|
||||
@@ -1455,7 +1604,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
}
|
||||
|
||||
// newLinkObject creates an fs.Object that represents a link a google docs drive.File
|
||||
func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
func (f *Fs) newLinkObject(ctx context.Context, remote string, info *drive.File, extension, exportMimeType string) (fs.Object, error) {
|
||||
t := linkTemplate(exportMimeType)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("unsupported link type %s", exportMimeType)
|
||||
@@ -1474,7 +1623,10 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
return nil, fmt.Errorf("executing template failed: %w", err)
|
||||
}
|
||||
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
baseObject, err := f.newBaseObject(ctx, remote+extension, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseObject.bytes = int64(buf.Len())
|
||||
baseObject.mimeType = exportMimeType
|
||||
return &linkObject{
|
||||
@@ -1490,7 +1642,7 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
if info.Md5Checksum != "" {
|
||||
return f.newRegularObject(remote, info), nil
|
||||
return f.newRegularObject(ctx, remote, info)
|
||||
}
|
||||
|
||||
extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info)
|
||||
@@ -1521,13 +1673,15 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
case info.MimeType == shortcutMimeTypeDangling:
|
||||
// Pretend a dangling shortcut is a regular object
|
||||
// It will error if used, but appear in listings so it can be deleted
|
||||
return f.newRegularObject(remote, info), nil
|
||||
return f.newRegularObject(ctx, remote, info)
|
||||
case info.Md5Checksum != "":
|
||||
// If item has MD5 sum it is a file stored on drive
|
||||
return f.newRegularObject(remote, info), nil
|
||||
return f.newRegularObject(ctx, remote, info)
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case f.opt.ShowAllGdocs:
|
||||
return f.newDocumentObject(ctx, remote, info, "", info.MimeType)
|
||||
default:
|
||||
// If item MimeType is in the ExportFormats then it is a google doc
|
||||
if !isDocument {
|
||||
@@ -1539,9 +1693,9 @@ func (f *Fs) newObjectWithExportInfo(
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if isLinkMimeType(exportMimeType) {
|
||||
return f.newLinkObject(remote, info, extension, exportMimeType)
|
||||
return f.newLinkObject(ctx, remote, info, extension, exportMimeType)
|
||||
}
|
||||
return f.newDocumentObject(remote, info, extension, exportMimeType)
|
||||
return f.newDocumentObject(ctx, remote, info, extension, exportMimeType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2169,7 +2323,7 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
|
||||
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
|
||||
return item, nil
|
||||
}
|
||||
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
|
||||
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
var gerr *googleapi.Error
|
||||
if errors.As(err, &gerr) && gerr.Code == 404 {
|
||||
@@ -2301,12 +2455,17 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
} else {
|
||||
createInfo.MimeType = fs.MimeTypeFromName(remote)
|
||||
}
|
||||
updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, options, createInfo, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
if size >= 0 && size < int64(f.opt.UploadCutoff) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
// Don't retry, return a retry error instead
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
_ = f.uploadsLimiter.Wait(ctx) // obey upslimit
|
||||
info, err = f.svc.Files.Create(createInfo).
|
||||
Media(in, googleapi.ContentType(srcMimeType), googleapi.ChunkSize(0)).
|
||||
Fields(partialFields).
|
||||
@@ -2325,6 +2484,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = updateMetadata(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
@@ -3242,7 +3405,7 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.fileFields)
|
||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
@@ -3922,10 +4085,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
MimeType: srcMimeType,
|
||||
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
|
||||
}
|
||||
|
||||
updateMetadata, err := o.fs.fetchAndUpdateMetadata(ctx, src, options, updateInfo, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = updateMetadata(ctx, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -4011,6 +4184,26 @@ func (o *baseObject) ParentID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
if o.metadata != nil {
|
||||
return *o.metadata, nil
|
||||
}
|
||||
fs.Debugf(o, "Fetching metadata")
|
||||
id := actualID(o.id)
|
||||
info, err := o.fs.getFile(ctx, id, o.fs.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.parseMetadata(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return *o.metadata, nil
|
||||
}
|
||||
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
@@ -4072,6 +4265,7 @@ var (
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
_ fs.Object = (*documentObject)(nil)
|
||||
_ fs.MimeTyper = (*documentObject)(nil)
|
||||
_ fs.IDer = (*documentObject)(nil)
|
||||
|
||||
608
backend/drive/metadata.go
Normal file
608
backend/drive/metadata.go
Normal file
@@ -0,0 +1,608 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sync/errgroup"
|
||||
drive "google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"content-type": {
|
||||
Help: "The MIME type of the file.",
|
||||
Type: "string",
|
||||
Example: "text/plain",
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification with mS accuracy.",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
||||
},
|
||||
"copy-requires-writer-permission": {
|
||||
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
},
|
||||
"writers-can-share": {
|
||||
Help: "Whether users with only writer permission can modify the file's permissions. Not populated for items in shared drives.",
|
||||
Type: "boolean",
|
||||
Example: "false",
|
||||
},
|
||||
"viewed-by-me": {
|
||||
Help: "Whether the file has been viewed by this user.",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"owner": {
|
||||
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
|
||||
Type: "string",
|
||||
Example: "user@example.com",
|
||||
},
|
||||
"permissions": {
|
||||
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
|
||||
Type: "JSON",
|
||||
Example: "{}",
|
||||
},
|
||||
"folder-color-rgb": {
|
||||
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
|
||||
Type: "string",
|
||||
Example: "881133",
|
||||
},
|
||||
"description": {
|
||||
Help: "A short description of the file.",
|
||||
Type: "string",
|
||||
Example: "Contract for signing",
|
||||
},
|
||||
"starred": {
|
||||
Help: "Whether the user has starred the file.",
|
||||
Type: "boolean",
|
||||
Example: "false",
|
||||
},
|
||||
"labels": {
|
||||
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
|
||||
Type: "JSON",
|
||||
Example: "[]",
|
||||
},
|
||||
}
|
||||
|
||||
// Extra fields we need to fetch to implement the system metadata above
|
||||
var metadataFields = googleapi.Field(strings.Join([]string{
|
||||
"copyRequiresWriterPermission",
|
||||
"description",
|
||||
"folderColorRgb",
|
||||
"hasAugmentedPermissions",
|
||||
"owners",
|
||||
"permissionIds",
|
||||
"permissions",
|
||||
"properties",
|
||||
"starred",
|
||||
"viewedByMe",
|
||||
"viewedByMeTime",
|
||||
"writersCanShare",
|
||||
}, ","))
|
||||
|
||||
// Fields we need to read from permissions
|
||||
var permissionsFields = googleapi.Field(strings.Join([]string{
|
||||
"*",
|
||||
"permissionDetails/*",
|
||||
}, ","))
|
||||
|
||||
// getPermission returns permissions for the fileID and permissionID passed in
|
||||
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
|
||||
f.permissionsMu.Lock()
|
||||
defer f.permissionsMu.Unlock()
|
||||
if useCache {
|
||||
perm = f.permissions[permissionID]
|
||||
if perm != nil {
|
||||
return perm, false, nil
|
||||
}
|
||||
}
|
||||
fs.Debugf(f, "Fetching permission %q", permissionID)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
perm, err = f.svc.Permissions.Get(fileID, permissionID).
|
||||
Fields(permissionsFields).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
|
||||
|
||||
cleanPermission(perm)
|
||||
|
||||
// cache the permission
|
||||
f.permissions[permissionID] = perm
|
||||
|
||||
return perm, inherited, err
|
||||
}
|
||||
|
||||
// Set the permissions on the info
|
||||
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
|
||||
for _, perm := range permissions {
|
||||
if perm.Role == "owner" {
|
||||
// ignore owner permissions - these are set with owner
|
||||
continue
|
||||
}
|
||||
cleanPermissionForWrite(perm)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Permissions.Create(info.Id, perm).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set permission: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clean attributes from permissions which we can't write
|
||||
func cleanPermissionForWrite(perm *drive.Permission) {
|
||||
perm.Deleted = false
|
||||
perm.DisplayName = ""
|
||||
perm.Id = ""
|
||||
perm.Kind = ""
|
||||
perm.PermissionDetails = nil
|
||||
perm.TeamDrivePermissionDetails = nil
|
||||
}
|
||||
|
||||
// Clean and cache the permission if not already cached
|
||||
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
|
||||
f.permissionsMu.Lock()
|
||||
defer f.permissionsMu.Unlock()
|
||||
cleanPermission(perm)
|
||||
if _, found := f.permissions[perm.Id]; !found {
|
||||
f.permissions[perm.Id] = perm
|
||||
}
|
||||
}
|
||||
|
||||
// Clean fields we don't need to keep from the permission
|
||||
func cleanPermission(perm *drive.Permission) {
|
||||
// DisplayName: Output only. The "pretty" name of the value of the
|
||||
// permission. The following is a list of examples for each type of
|
||||
// permission: * `user` - User's full name, as defined for their Google
|
||||
// account, such as "Joe Smith." * `group` - Name of the Google Group,
|
||||
// such as "The Company Administrators." * `domain` - String domain
|
||||
// name, such as "thecompany.com." * `anyone` - No `displayName` is
|
||||
// present.
|
||||
perm.DisplayName = ""
|
||||
|
||||
// Kind: Output only. Identifies what kind of resource this is. Value:
|
||||
// the fixed string "drive#permission".
|
||||
perm.Kind = ""
|
||||
|
||||
// PermissionDetails: Output only. Details of whether the permissions on
|
||||
// this shared drive item are inherited or directly on this item. This
|
||||
// is an output-only field which is present only for shared drive items.
|
||||
perm.PermissionDetails = nil
|
||||
|
||||
// PhotoLink: Output only. A link to the user's profile photo, if
|
||||
// available.
|
||||
perm.PhotoLink = ""
|
||||
|
||||
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
|
||||
// `permissionDetails` instead.
|
||||
perm.TeamDrivePermissionDetails = nil
|
||||
}
|
||||
|
||||
// Fields we need to read from labels
|
||||
var labelsFields = googleapi.Field(strings.Join([]string{
|
||||
"*",
|
||||
}, ","))
|
||||
|
||||
// getLabels returns labels for the fileID passed in
|
||||
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
|
||||
fs.Debugf(f, "Fetching labels for %q", fileID)
|
||||
listLabels := f.svc.Files.ListLabels(fileID).
|
||||
Fields(labelsFields).
|
||||
Context(ctx)
|
||||
for {
|
||||
var info *drive.LabelList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = listLabels.Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels = append(labels, info.Labels...)
|
||||
if info.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
listLabels.PageToken(info.NextPageToken)
|
||||
}
|
||||
for _, label := range labels {
|
||||
cleanLabel(label)
|
||||
}
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// Set the labels on the info
|
||||
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
|
||||
if len(labels) == 0 {
|
||||
return nil
|
||||
}
|
||||
req := drive.ModifyLabelsRequest{}
|
||||
for _, label := range labels {
|
||||
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
|
||||
FieldModifications: labelFieldsToFieldModifications(label.Fields),
|
||||
LabelId: label.Id,
|
||||
})
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set owner: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert label fields into something which can set the fields
|
||||
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
|
||||
for id, field := range fields {
|
||||
var emails []string
|
||||
for _, user := range field.User {
|
||||
emails = append(emails, user.EmailAddress)
|
||||
}
|
||||
out = append(out, &drive.LabelFieldModification{
|
||||
// FieldId: The ID of the field to be modified.
|
||||
FieldId: id,
|
||||
|
||||
// SetDateValues: Replaces the value of a dateString Field with these
|
||||
// new values. The string must be in the RFC 3339 full-date format:
|
||||
// YYYY-MM-DD.
|
||||
SetDateValues: field.DateString,
|
||||
|
||||
// SetIntegerValues: Replaces the value of an `integer` field with these
|
||||
// new values.
|
||||
SetIntegerValues: field.Integer,
|
||||
|
||||
// SetSelectionValues: Replaces a `selection` field with these new
|
||||
// values.
|
||||
SetSelectionValues: field.Selection,
|
||||
|
||||
// SetTextValues: Sets the value of a `text` field.
|
||||
SetTextValues: field.Text,
|
||||
|
||||
// SetUserValues: Replaces a `user` field with these new values. The
|
||||
// values must be valid email addresses.
|
||||
SetUserValues: emails,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Clean fields we don't need to keep from the label
|
||||
func cleanLabel(label *drive.Label) {
|
||||
// Kind: This is always drive#label
|
||||
label.Kind = ""
|
||||
|
||||
for name, field := range label.Fields {
|
||||
// Kind: This is always drive#labelField.
|
||||
field.Kind = ""
|
||||
|
||||
// Note the fields are copies so we need to write them
|
||||
// back to the map
|
||||
label.Fields[name] = field
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the metadata from drive item
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
|
||||
metadata := make(fs.Metadata, 16)
|
||||
|
||||
// Dump user metadata first as it overrides system metadata
|
||||
for k, v := range info.Properties {
|
||||
metadata[k] = v
|
||||
}
|
||||
|
||||
// System metadata
|
||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
||||
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
|
||||
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
|
||||
metadata["content-type"] = info.MimeType
|
||||
|
||||
// Owners: Output only. The owner of this file. Only certain legacy
|
||||
// files may have more than one owner. This field isn't populated for
|
||||
// items in shared drives.
|
||||
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
|
||||
user := info.Owners[0]
|
||||
if len(info.Owners) > 1 {
|
||||
fs.Logf(o, "Ignoring more than 1 owner")
|
||||
}
|
||||
if user != nil {
|
||||
id := user.EmailAddress
|
||||
if id == "" {
|
||||
id = user.DisplayName
|
||||
}
|
||||
metadata["owner"] = id
|
||||
}
|
||||
}
|
||||
|
||||
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
|
||||
// We only write permissions out if they are not inherited.
|
||||
//
|
||||
// On My Drives permissions seem to be attached to every item
|
||||
// so they will always be written out.
|
||||
//
|
||||
// On Shared Drives only non-inherited permissions will be
|
||||
// written out.
|
||||
|
||||
// To read the inherited permissions flag will mean we need to
|
||||
// read the permissions for each object and the cache will be
|
||||
// useless. However shared drives don't return permissions
|
||||
// only permissionIds so will need to fetch them for each
|
||||
// object. We use HasAugmentedPermissions to see if there are
|
||||
// special permissions before fetching them to save transactions.
|
||||
|
||||
// HasAugmentedPermissions: Output only. Whether there are permissions
|
||||
// directly on this file. This field is only populated for items in
|
||||
// shared drives.
|
||||
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
||||
// Don't process permissions if there aren't any specifically set
|
||||
info.Permissions = nil
|
||||
info.PermissionIds = nil
|
||||
}
|
||||
|
||||
// PermissionIds: Output only. List of permission IDs for users with
|
||||
// access to this file.
|
||||
//
|
||||
// Only process these if we have no Permissions
|
||||
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
|
||||
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(o.fs.ci.Checkers)
|
||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||
for _, permissionID := range info.PermissionIds {
|
||||
permissionID := permissionID
|
||||
g.Go(func() error {
|
||||
// must fetch the team drive ones individually to check the inherited flag
|
||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read permission: %w", err)
|
||||
}
|
||||
// Don't write inherited permissions out
|
||||
if inherited {
|
||||
return nil
|
||||
}
|
||||
// Don't write owner role out - these are covered by the owner metadata
|
||||
if perm.Role == "owner" {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
info.Permissions = append(info.Permissions, perm)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Clean the fetched permissions
|
||||
for _, perm := range info.Permissions {
|
||||
o.fs.cleanAndCachePermission(perm)
|
||||
}
|
||||
}
|
||||
|
||||
// Permissions: Output only. The full list of permissions for the file.
|
||||
// This is only available if the requesting user can share the file. Not
|
||||
// populated for items in shared drives.
|
||||
if len(info.Permissions) > 0 {
|
||||
buf, err := json.Marshal(info.Permissions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal permissions: %w", err)
|
||||
}
|
||||
metadata["permissions"] = string(buf)
|
||||
}
|
||||
|
||||
// Permission propagation
|
||||
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
|
||||
// Leads me to believe that in non shared drives, permissions
|
||||
// are added to each item when you set permissions for a
|
||||
// folder whereas in shared drives they are inherited and
|
||||
// placed on the item directly.
|
||||
}
|
||||
|
||||
if info.FolderColorRgb != "" {
|
||||
metadata["folder-color-rgb"] = info.FolderColorRgb
|
||||
}
|
||||
if info.Description != "" {
|
||||
metadata["description"] = info.Description
|
||||
}
|
||||
metadata["starred"] = fmt.Sprint(info.Starred)
|
||||
metadata["btime"] = info.CreatedTime
|
||||
metadata["mtime"] = info.ModifiedTime
|
||||
|
||||
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
|
||||
// FIXME would be really nice if we knew if files had labels
|
||||
// before listing but we need to know all possible label IDs
|
||||
// to get it in the listing.
|
||||
|
||||
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch labels: %w", err)
|
||||
}
|
||||
buf, err := json.Marshal(labels)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal labels: %w", err)
|
||||
}
|
||||
metadata["labels"] = string(buf)
|
||||
}
|
||||
|
||||
o.metadata = &metadata
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the owner on the info
|
||||
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
|
||||
perm := drive.Permission{
|
||||
Role: "owner",
|
||||
EmailAddress: owner,
|
||||
// Type: The type of the grantee. Valid values are: * `user` * `group` *
|
||||
// `domain` * `anyone` When creating a permission, if `type` is `user`
|
||||
// or `group`, you must provide an `emailAddress` for the user or group.
|
||||
// When `type` is `domain`, you must provide a `domain`. There isn't
|
||||
// extra information required for an `anyone` type.
|
||||
Type: "user",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Permissions.Create(info.Id, &perm).
|
||||
SupportsAllDrives(true).
|
||||
TransferOwnership(true).
|
||||
// SendNotificationEmail(false). - required apparently!
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set owner: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Call back to set metadata that can't be set on the upload/update
|
||||
//
|
||||
// The *drive.File passed in holds the current state of the drive.File
|
||||
// and this should update it with any modifications.
|
||||
type updateMetadataFn func(context.Context, *drive.File) error
|
||||
|
||||
// read the metadata from meta and write it into updateInfo
|
||||
//
|
||||
// update should be true if this is being used to create metadata for
|
||||
// an update/PATCH call as the rules on what can be updated are
|
||||
// slightly different there.
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
err := fn(ctx, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k, v := k, v
|
||||
// parse a boolean from v and write into out
|
||||
parseBool := func(out *bool) error {
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
|
||||
}
|
||||
*out = b
|
||||
return nil
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "viewed-by-me":
|
||||
// Can't write this
|
||||
case "content-type":
|
||||
updateInfo.MimeType = v
|
||||
case "owner":
|
||||
if !f.opt.MetadataOwner.IsSet(rwWrite) {
|
||||
continue
|
||||
}
|
||||
// Can't set Owner on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setOwner(ctx, info, v)
|
||||
})
|
||||
case "permissions":
|
||||
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
continue
|
||||
}
|
||||
var perms []*drive.Permission
|
||||
err := json.Unmarshal([]byte(v), &perms)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
|
||||
}
|
||||
// Can't set Permissions on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setPermissions(ctx, info, perms)
|
||||
})
|
||||
case "labels":
|
||||
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
||||
continue
|
||||
}
|
||||
var labels []*drive.Label
|
||||
err := json.Unmarshal([]byte(v), &labels)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
|
||||
}
|
||||
// Can't set Labels on upload so need to set afterwards
|
||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
||||
return f.setLabels(ctx, info, labels)
|
||||
})
|
||||
case "folder-color-rgb":
|
||||
updateInfo.FolderColorRgb = v
|
||||
case "description":
|
||||
updateInfo.Description = v
|
||||
case "starred":
|
||||
if err := parseBool(&updateInfo.Starred); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "btime":
|
||||
if update {
|
||||
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
|
||||
} else {
|
||||
updateInfo.CreatedTime = v
|
||||
}
|
||||
case "mtime":
|
||||
updateInfo.ModifiedTime = v
|
||||
default:
|
||||
if updateInfo.Properties == nil {
|
||||
updateInfo.Properties = make(map[string]string, 1)
|
||||
}
|
||||
updateInfo.Properties[k] = v
|
||||
}
|
||||
}
|
||||
return callback, nil
|
||||
}
|
||||
|
||||
// Fetch metadata and update updateInfo if --metadata is in use
|
||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
return callback, nil
|
||||
}
|
||||
@@ -71,6 +71,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
||||
var res *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_ = f.uploadsLimiter.Wait(ctx) // obey upslimit
|
||||
var body io.Reader
|
||||
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
|
||||
@@ -946,6 +946,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
if root == "/" {
|
||||
return errors.New("can't remove root directory")
|
||||
}
|
||||
encRoot := f.opt.Enc.FromStandardPath(root)
|
||||
|
||||
if check {
|
||||
// check directory exists
|
||||
@@ -954,10 +955,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
return fmt.Errorf("Rmdir: %w", err)
|
||||
}
|
||||
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Path: encRoot,
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
@@ -978,7 +978,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
|
||||
// remove it
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
@@ -1231,18 +1231,21 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
var total uint64
|
||||
var used = q.Used
|
||||
if q.Allocation != nil {
|
||||
if q.Allocation.Individual != nil {
|
||||
total += q.Allocation.Individual.Allocated
|
||||
}
|
||||
if q.Allocation.Team != nil {
|
||||
total += q.Allocation.Team.Allocated
|
||||
// Override used with Team.Used as this includes q.Used already
|
||||
used = q.Allocation.Team.Used
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -1310,10 +1310,11 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url
|
||||
if o.fs.opt.UserProject != "" {
|
||||
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
|
||||
url += "&userProject=" + o.fs.opt.UserProject
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1143,6 +1143,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
info = results[0]
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit batch: %w", err)
|
||||
}
|
||||
|
||||
o.setMetaData(info)
|
||||
|
||||
|
||||
@@ -80,6 +80,14 @@ func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||
}
|
||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
||||
}
|
||||
if f.db == nil {
|
||||
if f.opt.MaxAge == 0 {
|
||||
fs.Errorf(f, "db not found. (disabled with max_age = 0)")
|
||||
} else {
|
||||
fs.Errorf(f, "db not found.")
|
||||
}
|
||||
return kv.ErrInactive
|
||||
}
|
||||
op := &kvDump{
|
||||
full: full,
|
||||
root: root,
|
||||
|
||||
@@ -114,6 +114,13 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
root: rpath,
|
||||
opt: opt,
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
baseFeatures := baseFs.Features()
|
||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
||||
|
||||
@@ -411,7 +418,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
err = f.db.Stop(false)
|
||||
if f.db != nil {
|
||||
err = f.db.Stop(false)
|
||||
}
|
||||
if do := f.Fs.Features().Shutdown; do != nil {
|
||||
if err2 := do(ctx); err2 != nil {
|
||||
err = err2
|
||||
|
||||
@@ -60,9 +60,11 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
assert.NotNil(t, dst)
|
||||
|
||||
// check that hash was created
|
||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
if f.opt.MaxAge > 0 {
|
||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
}
|
||||
//t.Logf("hash is %q", hash)
|
||||
_ = operations.Purge(ctx, f, dirName)
|
||||
}
|
||||
|
||||
@@ -37,4 +37,9 @@ func TestIntegration(t *testing.T) {
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
// test again with MaxAge = 0
|
||||
if *fstest.RemoteName == "" {
|
||||
opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"})
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
options := hdfs.ClientOptions{
|
||||
Addresses: []string{opt.Namenode},
|
||||
Addresses: opt.Namenode,
|
||||
UseDatanodeHostname: false,
|
||||
}
|
||||
|
||||
|
||||
@@ -20,9 +20,10 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Help: "Hadoop name nodes and ports.\n\nE.g. \"namenode-1:8020,namenode-2:8020,...\" to connect to host namenodes at port 8020.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
Default: fs.CommaSepList{},
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
@@ -65,7 +66,7 @@ and 'privacy'. Used only with KERBEROS enabled.`,
|
||||
|
||||
// Options for this backend
|
||||
type Options struct {
|
||||
Namenode string `config:"namenode"`
|
||||
Namenode fs.CommaSepList `config:"namenode"`
|
||||
Username string `config:"username"`
|
||||
ServicePrincipalName string `config:"service_principal_name"`
|
||||
DataTransferProtection string `config:"data_transfer_protection"`
|
||||
|
||||
@@ -762,6 +762,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs.
|
||||
@@ -997,6 +1003,7 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -36,6 +36,7 @@ func init() {
|
||||
Name: "http",
|
||||
Description: "HTTP",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
@@ -210,6 +211,42 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
|
||||
return createFileResult()
|
||||
}
|
||||
|
||||
// Make the http connection with opt
|
||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
||||
if len(opt.Headers)%2 != 0 {
|
||||
return false, errors.New("odd number of headers supplied")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(opt.Endpoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
u, err := rest.URLJoin(base, rest.URLPathEscape(f.root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||
fs.Debugf(nil, "Root: %s", endpoint)
|
||||
u, err = url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Update f with the new parameters
|
||||
f.httpClient = client
|
||||
f.endpoint = u
|
||||
f.endpointURL = u.String()
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
@@ -220,47 +257,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(opt.Headers)%2 != 0 {
|
||||
return nil, errors.New("odd number of headers supplied")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(opt.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
|
||||
fs.Debugf(nil, "Root: %s", endpoint)
|
||||
u, err = url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Make the http connection
|
||||
isFile, err := f.httpConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
@@ -685,10 +698,66 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running http backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
This rebuilds the connection to the http backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
_, err = f.httpConnection(ctx, &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating session: %w", err)
|
||||
}
|
||||
f.opt = newOpt
|
||||
keys := []string{}
|
||||
for k := range opt {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.Commander = &Fs{}
|
||||
)
|
||||
|
||||
66
backend/imagekit/client/client.go
Normal file
66
backend/imagekit/client/client.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Package client provides a client for interacting with the ImageKit API.
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// ImageKit main struct
|
||||
type ImageKit struct {
|
||||
Prefix string
|
||||
UploadPrefix string
|
||||
Timeout int64
|
||||
UploadTimeout int64
|
||||
PrivateKey string
|
||||
PublicKey string
|
||||
URLEndpoint string
|
||||
HTTPClient *rest.Client
|
||||
}
|
||||
|
||||
// NewParams is a struct to define parameters to imagekit
|
||||
type NewParams struct {
|
||||
PrivateKey string
|
||||
PublicKey string
|
||||
URLEndpoint string
|
||||
}
|
||||
|
||||
// New returns ImageKit object from environment variables
|
||||
func New(ctx context.Context, params NewParams) (*ImageKit, error) {
|
||||
|
||||
privateKey := params.PrivateKey
|
||||
publicKey := params.PublicKey
|
||||
endpointURL := params.URLEndpoint
|
||||
|
||||
switch {
|
||||
case privateKey == "":
|
||||
return nil, fmt.Errorf("ImageKit.io URL endpoint is required")
|
||||
case publicKey == "":
|
||||
return nil, fmt.Errorf("ImageKit.io public key is required")
|
||||
case endpointURL == "":
|
||||
return nil, fmt.Errorf("ImageKit.io private key is required")
|
||||
}
|
||||
|
||||
cliCtx, cliCfg := fs.AddConfig(ctx)
|
||||
|
||||
cliCfg.UserAgent = "rclone/imagekit"
|
||||
client := rest.NewClient(fshttp.NewClient(cliCtx))
|
||||
|
||||
client.SetUserPass(privateKey, "")
|
||||
client.SetHeader("Accept", "application/json")
|
||||
|
||||
return &ImageKit{
|
||||
Prefix: "https://api.imagekit.io/v2",
|
||||
UploadPrefix: "https://upload.imagekit.io/api/v2",
|
||||
Timeout: 60,
|
||||
UploadTimeout: 3600,
|
||||
PrivateKey: params.PrivateKey,
|
||||
PublicKey: params.PublicKey,
|
||||
URLEndpoint: params.URLEndpoint,
|
||||
HTTPClient: client,
|
||||
}, nil
|
||||
}
|
||||
252
backend/imagekit/client/media.go
Normal file
252
backend/imagekit/client/media.go
Normal file
@@ -0,0 +1,252 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"gopkg.in/validator.v2"
|
||||
)
|
||||
|
||||
// FilesOrFolderParam struct is a parameter type to ListFiles() function to search / list media library files.
|
||||
type FilesOrFolderParam struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Limit int `json:"limit,omitempty"`
|
||||
Skip int `json:"skip,omitempty"`
|
||||
SearchQuery string `json:"searchQuery,omitempty"`
|
||||
}
|
||||
|
||||
// AITag represents an AI tag for a media library file.
|
||||
type AITag struct {
|
||||
Name string `json:"name"`
|
||||
Confidence float32 `json:"confidence"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
// File represents media library File details.
|
||||
type File struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"name"`
|
||||
FilePath string `json:"filePath"`
|
||||
Type string `json:"type"`
|
||||
VersionInfo map[string]string `json:"versionInfo"`
|
||||
IsPrivateFile *bool `json:"isPrivateFile"`
|
||||
CustomCoordinates *string `json:"customCoordinates"`
|
||||
URL string `json:"url"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
FileType string `json:"fileType"`
|
||||
Mime string `json:"mime"`
|
||||
Height int `json:"height"`
|
||||
Width int `json:"Width"`
|
||||
Size uint64 `json:"size"`
|
||||
HasAlpha bool `json:"hasAlpha"`
|
||||
CustomMetadata map[string]any `json:"customMetadata,omitempty"`
|
||||
EmbeddedMetadata map[string]any `json:"embeddedMetadata"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
Tags []string `json:"tags"`
|
||||
AITags []AITag `json:"AITags"`
|
||||
}
|
||||
|
||||
// Folder represents media library Folder details.
|
||||
type Folder struct {
|
||||
*File
|
||||
FolderPath string `json:"folderPath"`
|
||||
}
|
||||
|
||||
// CreateFolderParam represents parameter to create folder api
|
||||
type CreateFolderParam struct {
|
||||
FolderName string `validate:"nonzero" json:"folderName"`
|
||||
ParentFolderPath string `validate:"nonzero" json:"parentFolderPath"`
|
||||
}
|
||||
|
||||
// DeleteFolderParam represents parameter to delete folder api
|
||||
type DeleteFolderParam struct {
|
||||
FolderPath string `validate:"nonzero" json:"folderPath"`
|
||||
}
|
||||
|
||||
// MoveFolderParam represents parameter to move folder api
|
||||
type MoveFolderParam struct {
|
||||
SourceFolderPath string `validate:"nonzero" json:"sourceFolderPath"`
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
// JobStatus represents response Data to job status api
|
||||
type JobStatus struct {
|
||||
JobID string `json:"jobId"`
|
||||
Type string `json:"type"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// File represents media library File details.
|
||||
func (ik *ImageKit) File(ctx context.Context, fileID string) (*http.Response, *File, error) {
|
||||
data := &File{}
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: fmt.Sprintf("/files/%s/details", fileID),
|
||||
RootURL: ik.Prefix,
|
||||
IgnoreStatus: true,
|
||||
}, nil, data)
|
||||
|
||||
return response, data, err
|
||||
}
|
||||
|
||||
// Files retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
||||
func (ik *ImageKit) Files(ctx context.Context, params FilesOrFolderParam, includeVersion bool) (*http.Response, *[]File, error) {
|
||||
var SearchQuery = `type = "file"`
|
||||
|
||||
if includeVersion {
|
||||
SearchQuery = `type IN ["file", "file-version"]`
|
||||
}
|
||||
if params.SearchQuery != "" {
|
||||
SearchQuery = params.SearchQuery
|
||||
}
|
||||
|
||||
parameters := url.Values{}
|
||||
|
||||
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
||||
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
||||
parameters.Set("path", params.Path)
|
||||
parameters.Set("searchQuery", SearchQuery)
|
||||
|
||||
data := &[]File{}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/files",
|
||||
RootURL: ik.Prefix,
|
||||
Parameters: parameters,
|
||||
}, nil, data)
|
||||
|
||||
return response, data, err
|
||||
}
|
||||
|
||||
// DeleteFile removes file by FileID from media library
|
||||
func (ik *ImageKit) DeleteFile(ctx context.Context, fileID string) (*http.Response, error) {
|
||||
var err error
|
||||
|
||||
if fileID == "" {
|
||||
return nil, errors.New("fileID can not be empty")
|
||||
}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: fmt.Sprintf("/files/%s", fileID),
|
||||
RootURL: ik.Prefix,
|
||||
NoResponse: true,
|
||||
}, nil, nil)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
// Folders retrieves media library files. Filter options can be supplied as FilesOrFolderParam.
|
||||
func (ik *ImageKit) Folders(ctx context.Context, params FilesOrFolderParam) (*http.Response, *[]Folder, error) {
|
||||
var SearchQuery = `type = "folder"`
|
||||
|
||||
if params.SearchQuery != "" {
|
||||
SearchQuery = params.SearchQuery
|
||||
}
|
||||
|
||||
parameters := url.Values{}
|
||||
|
||||
parameters.Set("skip", fmt.Sprintf("%d", params.Skip))
|
||||
parameters.Set("limit", fmt.Sprintf("%d", params.Limit))
|
||||
parameters.Set("path", params.Path)
|
||||
parameters.Set("searchQuery", SearchQuery)
|
||||
|
||||
data := &[]Folder{}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/files",
|
||||
RootURL: ik.Prefix,
|
||||
Parameters: parameters,
|
||||
}, nil, data)
|
||||
|
||||
if err != nil {
|
||||
return resp, data, err
|
||||
}
|
||||
|
||||
return resp, data, err
|
||||
}
|
||||
|
||||
// CreateFolder creates a new folder in media library
|
||||
func (ik *ImageKit) CreateFolder(ctx context.Context, param CreateFolderParam) (*http.Response, error) {
|
||||
var err error
|
||||
|
||||
if err = validator.Validate(¶m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder",
|
||||
RootURL: ik.Prefix,
|
||||
NoResponse: true,
|
||||
}, param, nil)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
// DeleteFolder removes the folder from media library
|
||||
func (ik *ImageKit) DeleteFolder(ctx context.Context, param DeleteFolderParam) (*http.Response, error) {
|
||||
var err error
|
||||
|
||||
if err = validator.Validate(¶m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/folder",
|
||||
RootURL: ik.Prefix,
|
||||
NoResponse: true,
|
||||
}, param, nil)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
// MoveFolder moves given folder to new path in media library
|
||||
func (ik *ImageKit) MoveFolder(ctx context.Context, param MoveFolderParam) (*http.Response, *JobIDResponse, error) {
|
||||
var err error
|
||||
var response = &JobIDResponse{}
|
||||
|
||||
if err = validator.Validate(¶m); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "bulkJobs/moveFolder",
|
||||
RootURL: ik.Prefix,
|
||||
}, param, response)
|
||||
|
||||
return resp, response, err
|
||||
}
|
||||
|
||||
// BulkJobStatus retrieves the status of a bulk job by job ID.
|
||||
func (ik *ImageKit) BulkJobStatus(ctx context.Context, jobID string) (*http.Response, *JobStatus, error) {
|
||||
var err error
|
||||
var response = &JobStatus{}
|
||||
|
||||
if jobID == "" {
|
||||
return nil, nil, errors.New("jobId can not be blank")
|
||||
}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "bulkJobs/" + jobID,
|
||||
RootURL: ik.Prefix,
|
||||
}, nil, response)
|
||||
|
||||
return resp, response, err
|
||||
}
|
||||
96
backend/imagekit/client/upload.go
Normal file
96
backend/imagekit/client/upload.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// UploadParam defines upload parameters
|
||||
type UploadParam struct {
|
||||
FileName string `json:"fileName"`
|
||||
Folder string `json:"folder,omitempty"` // default value: /
|
||||
Tags string `json:"tags,omitempty"`
|
||||
IsPrivateFile *bool `json:"isPrivateFile,omitempty"` // default: false
|
||||
}
|
||||
|
||||
// UploadResult defines the response structure for the upload API
|
||||
type UploadResult struct {
|
||||
FileID string `json:"fileId"`
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
ThumbnailURL string `json:"thumbnailUrl"`
|
||||
Height int `json:"height"`
|
||||
Width int `json:"Width"`
|
||||
Size uint64 `json:"size"`
|
||||
FilePath string `json:"filePath"`
|
||||
AITags []map[string]any `json:"AITags"`
|
||||
VersionInfo map[string]string `json:"versionInfo"`
|
||||
}
|
||||
|
||||
// Upload uploads an asset to a imagekit account.
|
||||
//
|
||||
// The asset can be:
|
||||
// - the actual data (io.Reader)
|
||||
// - the Data URI (Base64 encoded), max ~60 MB (62,910,000 chars)
|
||||
// - the remote FTP, HTTP or HTTPS URL address of an existing file
|
||||
//
|
||||
// https://docs.imagekit.io/api-reference/upload-file-api/server-side-file-upload
|
||||
func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadParam) (*http.Response, *UploadResult, error) {
|
||||
var err error
|
||||
|
||||
if param.FileName == "" {
|
||||
return nil, nil, errors.New("Upload: Filename is required")
|
||||
}
|
||||
|
||||
// Initialize URL values
|
||||
formParams := url.Values{}
|
||||
|
||||
formParams.Add("useUniqueFileName", fmt.Sprint(false))
|
||||
|
||||
// Add individual fields to URL values
|
||||
if param.FileName != "" {
|
||||
formParams.Add("fileName", param.FileName)
|
||||
}
|
||||
|
||||
if param.Tags != "" {
|
||||
formParams.Add("tags", param.Tags)
|
||||
}
|
||||
|
||||
if param.Folder != "" {
|
||||
formParams.Add("folder", param.Folder)
|
||||
}
|
||||
|
||||
if param.IsPrivateFile != nil {
|
||||
formParams.Add("isPrivateFile", fmt.Sprintf("%v", *param.IsPrivateFile))
|
||||
}
|
||||
|
||||
response := &UploadResult{}
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload",
|
||||
RootURL: ik.UploadPrefix,
|
||||
Body: formReader,
|
||||
ContentType: contentType,
|
||||
}
|
||||
|
||||
resp, err := ik.HTTPClient.CallJSON(ctx, &opts, nil, response)
|
||||
|
||||
if err != nil {
|
||||
return resp, response, err
|
||||
}
|
||||
|
||||
return resp, response, err
|
||||
}
|
||||
72
backend/imagekit/client/url.go
Normal file
72
backend/imagekit/client/url.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// URLParam represents parameters for generating url
|
||||
type URLParam struct {
|
||||
Path string
|
||||
Src string
|
||||
URLEndpoint string
|
||||
Signed bool
|
||||
ExpireSeconds int64
|
||||
QueryParameters map[string]string
|
||||
}
|
||||
|
||||
// URL generates url from URLParam
|
||||
func (ik *ImageKit) URL(params URLParam) (string, error) {
|
||||
var resultURL string
|
||||
var url *neturl.URL
|
||||
var err error
|
||||
var endpoint = params.URLEndpoint
|
||||
|
||||
if endpoint == "" {
|
||||
endpoint = ik.URLEndpoint
|
||||
}
|
||||
|
||||
endpoint = strings.TrimRight(endpoint, "/") + "/"
|
||||
|
||||
if params.QueryParameters == nil {
|
||||
params.QueryParameters = make(map[string]string)
|
||||
}
|
||||
|
||||
if url, err = neturl.Parse(params.Src); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
query := url.Query()
|
||||
|
||||
for k, v := range params.QueryParameters {
|
||||
query.Set(k, v)
|
||||
}
|
||||
url.RawQuery = query.Encode()
|
||||
resultURL = url.String()
|
||||
|
||||
if params.Signed {
|
||||
now := time.Now().Unix()
|
||||
|
||||
var expires = strconv.FormatInt(now+params.ExpireSeconds, 10)
|
||||
var path = strings.Replace(resultURL, endpoint, "", 1)
|
||||
|
||||
path = path + expires
|
||||
mac := hmac.New(sha1.New, []byte(ik.PrivateKey))
|
||||
mac.Write([]byte(path))
|
||||
signature := hex.EncodeToString(mac.Sum(nil))
|
||||
|
||||
if strings.Contains(resultURL, "?") {
|
||||
resultURL = resultURL + "&" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
|
||||
} else {
|
||||
resultURL = resultURL + "?" + fmt.Sprintf("ik-t=%s&ik-s=%s", expires, signature)
|
||||
}
|
||||
}
|
||||
|
||||
return resultURL, nil
|
||||
}
|
||||
828
backend/imagekit/imagekit.go
Normal file
828
backend/imagekit/imagekit.go
Normal file
@@ -0,0 +1,828 @@
|
||||
// Package imagekit provides an interface to the ImageKit.io media library.
|
||||
package imagekit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/imagekit/client"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 1 * time.Millisecond
|
||||
maxSleep = 100 * time.Millisecond
|
||||
decayConstant = 2
|
||||
)
|
||||
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation) read from Last-Modified header",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"size": {
|
||||
Help: "Size of the object in bytes",
|
||||
Type: "int64",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"file-type": {
|
||||
Help: "Type of the file",
|
||||
Type: "string",
|
||||
Example: "image",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"height": {
|
||||
Help: "Height of the image or video in pixels",
|
||||
Type: "int",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"width": {
|
||||
Help: "Width of the image or video in pixels",
|
||||
Type: "int",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"has-alpha": {
|
||||
Help: "Whether the image has alpha channel or not",
|
||||
Type: "bool",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"tags": {
|
||||
Help: "Tags associated with the file",
|
||||
Type: "string",
|
||||
Example: "tag1,tag2",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"google-tags": {
|
||||
Help: "AI generated tags by Google Cloud Vision associated with the image",
|
||||
Type: "string",
|
||||
Example: "tag1,tag2",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"aws-tags": {
|
||||
Help: "AI generated tags by AWS Rekognition associated with the image",
|
||||
Type: "string",
|
||||
Example: "tag1,tag2",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"is-private-file": {
|
||||
Help: "Whether the file is private or not",
|
||||
Type: "bool",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"custom-coordinates": {
|
||||
Help: "Custom coordinates of the file",
|
||||
Type: "string",
|
||||
Example: "0,0,100,100",
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "imagekit",
|
||||
Description: "ImageKit.io",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
|
||||
Required: true,
|
||||
},
|
||||
{
|
||||
Name: "public_key",
|
||||
Help: "You can find your ImageKit.io public key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "private_key",
|
||||
Help: "You can find your ImageKit.io private key in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "only_signed",
|
||||
Help: "If you have configured `Restrict unsigned image URLs` in your dashboard settings, set this to true.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "upload_tags",
|
||||
Help: "Tags to add to the uploaded files, e.g. \"tag1,tag2\".",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.EncodeZero |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeHashPercent |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeDot |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeSquareBracket |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
PublicKey string `config:"public_key"`
|
||||
PrivateKey string `config:"private_key"`
|
||||
OnlySigned bool `config:"only_signed"`
|
||||
Versions bool `config:"versions"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to ImageKit
|
||||
type Fs struct {
|
||||
name string // name of remote
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
ik *client.ImageKit // ImageKit client
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a ImageKit file
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path
|
||||
filePath string // The path to the file
|
||||
contentType string // The content type of the object if known - may be ""
|
||||
timestamp time.Time // The timestamp of the object if known - may be zero
|
||||
file client.File // The media file if known - may be nil
|
||||
versionID string // If present this points to an object version
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ik, err := client.New(ctx, client.NewParams{
|
||||
URLEndpoint: opt.Endpoint,
|
||||
PublicKey: opt.PublicKey,
|
||||
PrivateKey: opt.PrivateKey,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ik: ik,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.root = path.Join("/", root)
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: false,
|
||||
ServerSideAcrossConfigs: false,
|
||||
IsLocal: false,
|
||||
SlowHash: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: false,
|
||||
UserMetadata: false,
|
||||
FilterAware: true,
|
||||
PartialUploads: false,
|
||||
NoMultiThreading: false,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.root != "/" {
|
||||
|
||||
r := f.root
|
||||
|
||||
folderPath := f.EncodePath(r[:strings.LastIndex(r, "/")+1])
|
||||
fileName := f.EncodeFileName(r[strings.LastIndex(r, "/")+1:])
|
||||
|
||||
file := f.getFileByName(ctx, folderPath, fileName)
|
||||
|
||||
if file != nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
f.root = newRoot
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return strings.TrimLeft(f.root, "/")
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FS imagekit: %s", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs.
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
remote := path.Join(f.root, dir)
|
||||
|
||||
remote = f.EncodePath(remote)
|
||||
|
||||
if remote != "/" {
|
||||
parentFolderPath, folderName := path.Split(remote)
|
||||
folderExists, err := f.getFolderByName(ctx, parentFolderPath, folderName)
|
||||
|
||||
if err != nil {
|
||||
return make(fs.DirEntries, 0), err
|
||||
}
|
||||
|
||||
if folderExists == nil {
|
||||
return make(fs.DirEntries, 0), fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
|
||||
folders, folderError := f.getFolders(ctx, remote)
|
||||
|
||||
if folderError != nil {
|
||||
return make(fs.DirEntries, 0), folderError
|
||||
}
|
||||
|
||||
files, fileError := f.getFiles(ctx, remote, f.opt.Versions)
|
||||
|
||||
if fileError != nil {
|
||||
return make(fs.DirEntries, 0), fileError
|
||||
}
|
||||
|
||||
res := make([]fs.DirEntry, 0, len(folders)+len(files))
|
||||
|
||||
for _, folder := range folders {
|
||||
folderPath := f.DecodePath(strings.TrimLeft(strings.Replace(folder.FolderPath, f.EncodePath(f.root), "", 1), "/"))
|
||||
res = append(res, fs.NewDir(folderPath, folder.UpdatedAt))
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
res = append(res, f.newObject(ctx, remote, file))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newObject(ctx context.Context, remote string, file client.File) *Object {
|
||||
remoteFile := strings.TrimLeft(strings.Replace(file.FilePath, f.EncodePath(f.root), "", 1), "/")
|
||||
|
||||
folderPath, fileName := path.Split(remoteFile)
|
||||
|
||||
folderPath = f.DecodePath(folderPath)
|
||||
fileName = f.DecodeFileName(fileName)
|
||||
|
||||
remoteFile = path.Join(folderPath, fileName)
|
||||
|
||||
if file.Type == "file-version" {
|
||||
remoteFile = version.Add(remoteFile, file.UpdatedAt)
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remoteFile,
|
||||
filePath: file.FilePath,
|
||||
contentType: file.Mime,
|
||||
timestamp: file.UpdatedAt,
|
||||
file: file,
|
||||
versionID: file.VersionInfo["id"],
|
||||
}
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remoteFile,
|
||||
filePath: file.FilePath,
|
||||
contentType: file.Mime,
|
||||
timestamp: file.UpdatedAt,
|
||||
file: file,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
//
|
||||
// If remote points to a directory then it should return
|
||||
// ErrorIsDir if possible without doing any extra work,
|
||||
// otherwise ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
r := path.Join(f.root, remote)
|
||||
|
||||
folderPath, fileName := path.Split(r)
|
||||
|
||||
folderPath = f.EncodePath(folderPath)
|
||||
fileName = f.EncodeFileName(fileName)
|
||||
|
||||
isFolder, err := f.getFolderByName(ctx, folderPath, fileName)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFolder != nil {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
file := f.getFileByName(ctx, folderPath, fileName)
|
||||
|
||||
if file == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return f.newObject(ctx, r, *file), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return uploadFile(ctx, f, in, src.Remote(), options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
remote := path.Join(f.root, dir)
|
||||
parentFolderPath, folderName := path.Split(remote)
|
||||
|
||||
parentFolderPath = f.EncodePath(parentFolderPath)
|
||||
folderName = f.EncodeFileName(folderName)
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = f.ik.CreateFolder(ctx, client.CreateFolderParam{
|
||||
ParentFolderPath: parentFolderPath,
|
||||
FolderName: folderName,
|
||||
})
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
|
||||
entries, err := f.List(ctx, dir)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(entries) > 0 {
|
||||
return errors.New("directory is not empty")
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
|
||||
FolderPath: f.EncodePath(path.Join(f.root, dir)),
|
||||
})
|
||||
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
|
||||
remote := path.Join(f.root, dir)
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = f.ik.DeleteFolder(ctx, client.DeleteFolderParam{
|
||||
FolderPath: f.EncodePath(remote),
|
||||
})
|
||||
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
|
||||
duration := time.Duration(math.Abs(float64(expire)))
|
||||
|
||||
expireSeconds := duration.Seconds()
|
||||
|
||||
fileRemote := path.Join(f.root, remote)
|
||||
|
||||
folderPath, fileName := path.Split(fileRemote)
|
||||
folderPath = f.EncodePath(folderPath)
|
||||
fileName = f.EncodeFileName(fileName)
|
||||
|
||||
file := f.getFileByName(ctx, folderPath, fileName)
|
||||
|
||||
if file == nil {
|
||||
return "", fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Pacer not needed as this doesn't use the API
|
||||
url, err := f.ik.URL(client.URLParam{
|
||||
Src: file.URL,
|
||||
Signed: *file.IsPrivateFile || f.opt.OnlySigned,
|
||||
ExpireSeconds: int64(expireSeconds),
|
||||
QueryParameters: map[string]string{
|
||||
"updatedAt": file.UpdatedAt.String(),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return url, nil
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.file.Name
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime(context.Context) time.Time {
|
||||
return o.file.UpdatedAt
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.file.Size)
|
||||
}
|
||||
|
||||
// MimeType returns the MIME type of the file
|
||||
func (o *Object) MimeType(context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
|
||||
fs.FixRangeOption(options, -1)
|
||||
partialContent := false
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(-1)
|
||||
partialContent = true
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
partialContent = true
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pacer not needed as this doesn't use the API
|
||||
url, err := o.fs.ik.URL(client.URLParam{
|
||||
Src: o.file.URL,
|
||||
Signed: *o.file.IsPrivateFile || o.fs.opt.OnlySigned,
|
||||
QueryParameters: map[string]string{
|
||||
"tr": "orig-true",
|
||||
"updatedAt": o.file.UpdatedAt.String(),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+count-1))
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
end := resp.ContentLength
|
||||
|
||||
if partialContent && resp.StatusCode == http.StatusOK {
|
||||
skip := offset
|
||||
|
||||
if offset < 0 {
|
||||
skip = end + offset + 1
|
||||
}
|
||||
|
||||
_, err = io.CopyN(io.Discard, resp.Body, skip)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(resp.Body, end-skip), nil
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
srcRemote := o.Remote()
|
||||
|
||||
remote := path.Join(o.fs.root, srcRemote)
|
||||
folderPath, fileName := path.Split(remote)
|
||||
|
||||
UseUniqueFileName := new(bool)
|
||||
*UseUniqueFileName = false
|
||||
|
||||
var resp *client.UploadResult
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{
|
||||
FileName: fileName,
|
||||
Folder: folderPath,
|
||||
IsPrivateFile: o.file.IsPrivateFile,
|
||||
})
|
||||
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileID := resp.FileID
|
||||
|
||||
_, file, err := o.fs.ik.File(ctx, fileID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.file = *file
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, err = o.fs.ik.DeleteFile(ctx, o.file.FileID)
|
||||
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := path.Join(f.root, srcRemote)
|
||||
folderPath, fileName := path.Split(remote)
|
||||
|
||||
folderPath = f.EncodePath(folderPath)
|
||||
fileName = f.EncodeFileName(fileName)
|
||||
|
||||
UseUniqueFileName := new(bool)
|
||||
*UseUniqueFileName = false
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var res *http.Response
|
||||
var err error
|
||||
res, _, err = f.ik.Upload(ctx, in, client.UploadParam{
|
||||
FileName: fileName,
|
||||
Folder: folderPath,
|
||||
IsPrivateFile: &f.opt.OnlySigned,
|
||||
})
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.NewObject(ctx, srcRemote)
|
||||
}
|
||||
|
||||
// Metadata returns the metadata for the object
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
|
||||
metadata.Set("btime", o.file.CreatedAt.Format(time.RFC3339))
|
||||
metadata.Set("size", strconv.FormatUint(o.file.Size, 10))
|
||||
metadata.Set("file-type", o.file.FileType)
|
||||
metadata.Set("height", strconv.Itoa(o.file.Height))
|
||||
metadata.Set("width", strconv.Itoa(o.file.Width))
|
||||
metadata.Set("has-alpha", strconv.FormatBool(o.file.HasAlpha))
|
||||
|
||||
for k, v := range o.file.EmbeddedMetadata {
|
||||
metadata.Set(k, fmt.Sprint(v))
|
||||
}
|
||||
|
||||
if o.file.Tags != nil {
|
||||
metadata.Set("tags", strings.Join(o.file.Tags, ","))
|
||||
}
|
||||
|
||||
if o.file.CustomCoordinates != nil {
|
||||
metadata.Set("custom-coordinates", *o.file.CustomCoordinates)
|
||||
}
|
||||
|
||||
if o.file.IsPrivateFile != nil {
|
||||
metadata.Set("is-private-file", strconv.FormatBool(*o.file.IsPrivateFile))
|
||||
}
|
||||
|
||||
if o.file.AITags != nil {
|
||||
googleTags := []string{}
|
||||
awsTags := []string{}
|
||||
|
||||
for _, tag := range o.file.AITags {
|
||||
if tag.Source == "google-auto-tagging" {
|
||||
googleTags = append(googleTags, tag.Name)
|
||||
} else if tag.Source == "aws-auto-tagging" {
|
||||
awsTags = append(awsTags, tag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(googleTags) > 0 {
|
||||
metadata.Set("google-tags", strings.Join(googleTags, ","))
|
||||
}
|
||||
|
||||
if len(awsTags) > 0 {
|
||||
metadata.Set("aws-tags", strings.Join(awsTags, ","))
|
||||
}
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
file, err := srcObj.Open(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uploadFile(ctx, f, file, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
18
backend/imagekit/imagekit_test.go
Normal file
18
backend/imagekit/imagekit_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package imagekit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
debug := true
|
||||
fstest.Verbose = &debug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestImageKit:",
|
||||
NilObject: (*Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
})
|
||||
}
|
||||
193
backend/imagekit/util.go
Normal file
193
backend/imagekit/util.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package imagekit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/imagekit/client"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
func (f *Fs) getFiles(ctx context.Context, path string, includeVersions bool) (files []client.File, err error) {
|
||||
|
||||
files = make([]client.File, 0)
|
||||
|
||||
var hasMore = true
|
||||
|
||||
for hasMore {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var data *[]client.File
|
||||
var res *http.Response
|
||||
res, data, err = f.ik.Files(ctx, client.FilesOrFolderParam{
|
||||
Skip: len(files),
|
||||
Limit: 100,
|
||||
Path: path,
|
||||
}, includeVersions)
|
||||
|
||||
hasMore = !(len(*data) == 0 || len(*data) < 100)
|
||||
|
||||
if len(*data) > 0 {
|
||||
files = append(files, *data...)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return make([]client.File, 0), err
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getFolders(ctx context.Context, path string) (folders []client.Folder, err error) {
|
||||
|
||||
folders = make([]client.Folder, 0)
|
||||
|
||||
var hasMore = true
|
||||
|
||||
for hasMore {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var data *[]client.Folder
|
||||
var res *http.Response
|
||||
res, data, err = f.ik.Folders(ctx, client.FilesOrFolderParam{
|
||||
Skip: len(folders),
|
||||
Limit: 100,
|
||||
Path: path,
|
||||
})
|
||||
|
||||
hasMore = !(len(*data) == 0 || len(*data) < 100)
|
||||
|
||||
if len(*data) > 0 {
|
||||
folders = append(folders, *data...)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return make([]client.Folder, 0), err
|
||||
}
|
||||
|
||||
return folders, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getFileByName(ctx context.Context, path string, name string) (file *client.File) {
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
res, data, err := f.ik.Files(ctx, client.FilesOrFolderParam{
|
||||
Limit: 1,
|
||||
Path: path,
|
||||
SearchQuery: fmt.Sprintf(`type = "file" AND name = %s`, strconv.Quote(name)),
|
||||
}, false)
|
||||
|
||||
if len(*data) == 0 {
|
||||
file = nil
|
||||
} else {
|
||||
file = &(*data)[0]
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func (f *Fs) getFolderByName(ctx context.Context, path string, name string) (folder *client.Folder, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, data, err := f.ik.Folders(ctx, client.FilesOrFolderParam{
|
||||
Limit: 1,
|
||||
Path: path,
|
||||
SearchQuery: fmt.Sprintf(`type = "folder" AND name = %s`, strconv.Quote(name)),
|
||||
})
|
||||
|
||||
if len(*data) == 0 {
|
||||
folder = nil
|
||||
} else {
|
||||
folder = &(*data)[0]
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (e.g. "Token has expired")
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
|
||||
var retryAfter = 1
|
||||
retryAfterString := resp.Header.Get("X-RateLimit-Reset")
|
||||
if retryAfterString != "" {
|
||||
var err error
|
||||
retryAfter, err = strconv.Atoi(retryAfterString)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Malformed %s header %q: %v", "X-RateLimit-Reset", retryAfterString, err)
|
||||
}
|
||||
}
|
||||
|
||||
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Millisecond)
|
||||
}
|
||||
|
||||
return fserrors.ShouldRetry(err) || shouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// EncodePath encapsulates the logic for encoding a path
|
||||
func (f *Fs) EncodePath(str string) string {
|
||||
return f.opt.Enc.FromStandardPath(str)
|
||||
}
|
||||
|
||||
// DecodePath encapsulates the logic for decoding a path
|
||||
func (f *Fs) DecodePath(str string) string {
|
||||
return f.opt.Enc.ToStandardPath(str)
|
||||
}
|
||||
|
||||
// EncodeFileName encapsulates the logic for encoding a file name
|
||||
func (f *Fs) EncodeFileName(str string) string {
|
||||
return f.opt.Enc.FromStandardName(str)
|
||||
}
|
||||
|
||||
// DecodeFileName encapsulates the logic for decoding a file name
|
||||
func (f *Fs) DecodeFileName(str string) string {
|
||||
return f.opt.Enc.ToStandardName(str)
|
||||
}
|
||||
@@ -802,7 +802,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||
}
|
||||
var mdata fs.Metadata
|
||||
mdata, err = fs.GetMetadataOptions(ctx, src, options)
|
||||
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err == nil && mdata != nil {
|
||||
for mk, mv := range mdata {
|
||||
mk = strings.ToLower(mk)
|
||||
|
||||
@@ -1680,6 +1680,12 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
@@ -1944,7 +1950,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
in = wrap(in)
|
||||
}
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
@@ -2104,6 +2110,7 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
|
||||
897
backend/linkbox/linkbox.go
Normal file
897
backend/linkbox/linkbox.go
Normal file
@@ -0,0 +1,897 @@
|
||||
// Package linkbox provides an interface to the linkbox.to Cloud storage system.
|
||||
//
|
||||
// API docs: https://www.linkbox.to/api-docs
|
||||
package linkbox
|
||||
|
||||
/*
|
||||
Extras
|
||||
- PublicLink - NO - sharing doesn't share the actual file, only a page with it on
|
||||
- Move - YES - have Move and Rename file APIs so is possible
|
||||
- MoveDir - NO - probably not possible - have Move but no Rename
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
maxEntitiesPerPage = 1024
|
||||
minSleep = 200 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
pacerBurst = 1
|
||||
linkboxAPIURL = "https://www.linkbox.to/api/open/"
|
||||
rootID = "0" // ID of root directory
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "linkbox",
|
||||
Description: "Linkbox",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "token",
|
||||
Help: "Token from https://www.linkbox.to/admin/account",
|
||||
Sensitive: true,
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Token string `config:"token"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote Linkbox files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options // options for this backend
|
||||
features *fs.Features // optional features
|
||||
ci *fs.ConfigInfo // global config
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
contentType string
|
||||
fullURL string
|
||||
dirID int64
|
||||
itemID string // and these IDs are for files
|
||||
id int64 // these IDs appear to apply to directories
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = strings.Trim(root, "/")
|
||||
// Parse config into Options struct
|
||||
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
root: root,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(fshttp.NewClient(ctx)),
|
||||
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep))),
|
||||
}
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
CaseInsensitive: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(ctx, &tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
type entity struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
Ctime int64 `json:"ctime"`
|
||||
Size int64 `json:"size"`
|
||||
ID int64 `json:"id"`
|
||||
Pid int64 `json:"pid"`
|
||||
ItemID string `json:"item_id"`
|
||||
}
|
||||
|
||||
// Return true if the entity is a directory
|
||||
func (e *entity) isDir() bool {
|
||||
return e.Type == "dir" || e.Type == "sdir"
|
||||
}
|
||||
|
||||
type data struct {
|
||||
Entities []entity `json:"list"`
|
||||
}
|
||||
type fileSearchRes struct {
|
||||
response
|
||||
SearchData data `json:"data"`
|
||||
}
|
||||
|
||||
// Set an object info from an entity
|
||||
func (o *Object) set(e *entity) {
|
||||
o.modTime = time.Unix(e.Ctime, 0)
|
||||
o.contentType = e.Type
|
||||
o.size = e.Size
|
||||
o.fullURL = e.URL
|
||||
o.isDir = e.isDir()
|
||||
o.id = e.ID
|
||||
o.itemID = e.ItemID
|
||||
o.dirID = e.Pid
|
||||
}
|
||||
|
||||
// Call linkbox with the query in opts and return result
|
||||
//
|
||||
// This will be checked for error and an error will be returned if Status != 1
|
||||
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responser := result.(responser)
|
||||
if responser.IsError() {
|
||||
return responser
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
// User function to process a File item from listAll
|
||||
//
|
||||
// Should return true to finish processing
|
||||
type listAllFn func(*entity) bool
|
||||
|
||||
// Search is a bit fussy about which characters match
|
||||
//
|
||||
// If the name doesn't match this then do an dir list instead
|
||||
var searchOK = regexp.MustCompile(`^[a-zA-Z0-9_ .]+$`)
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
//
|
||||
// If you set name then search ignores dirID. name is a substring
|
||||
// search also so name="dir" matches "sub dir" also. This filters it
|
||||
// down so it only returns items in dirID
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, name string, fn listAllFn) (found bool, err error) {
|
||||
var (
|
||||
pageNumber = 0
|
||||
numberOfEntities = maxEntitiesPerPage
|
||||
)
|
||||
name = strings.TrimSpace(name) // search doesn't like spaces
|
||||
if !searchOK.MatchString(name) {
|
||||
// If name isn't good then do an unbounded search
|
||||
name = ""
|
||||
}
|
||||
OUTER:
|
||||
for numberOfEntities == maxEntitiesPerPage {
|
||||
pageNumber++
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: linkboxAPIURL,
|
||||
Path: "file_search",
|
||||
Parameters: url.Values{
|
||||
"token": {f.opt.Token},
|
||||
"name": {name},
|
||||
"pid": {dirID},
|
||||
"pageNo": {itoa(pageNumber)},
|
||||
"pageSize": {itoa64(maxEntitiesPerPage)},
|
||||
},
|
||||
}
|
||||
|
||||
var responseResult fileSearchRes
|
||||
err = getUnmarshaledResponse(ctx, f, opts, &responseResult)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("getting files failed: %w", err)
|
||||
|
||||
}
|
||||
|
||||
numberOfEntities = len(responseResult.SearchData.Entities)
|
||||
|
||||
for _, entity := range responseResult.SearchData.Entities {
|
||||
if itoa64(entity.Pid) != dirID {
|
||||
// when name != "" this returns from all directories, so ignore not this one
|
||||
continue
|
||||
}
|
||||
if fn(&entity) {
|
||||
found = true
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
if pageNumber > 100000 {
|
||||
return false, fmt.Errorf("too many results")
|
||||
}
|
||||
}
|
||||
return found, nil
|
||||
}
|
||||
|
||||
// Turn 64 bit int to string
|
||||
func itoa64(i int64) string {
|
||||
return strconv.FormatInt(i, 10)
|
||||
}
|
||||
|
||||
// Turn int to string
|
||||
func itoa(i int) string {
|
||||
return itoa64(int64(i))
|
||||
}
|
||||
|
||||
func splitDirAndName(remote string) (dir string, name string) {
|
||||
lastSlashPosition := strings.LastIndex(remote, "/")
|
||||
if lastSlashPosition == -1 {
|
||||
dir = ""
|
||||
name = remote
|
||||
} else {
|
||||
dir = remote[:lastSlashPosition]
|
||||
name = remote[lastSlashPosition+1:]
|
||||
}
|
||||
|
||||
// fs.Debugf(nil, "splitDirAndName remote = {%s}, dir = {%s}, name = {%s}", remote, dir, name)
|
||||
|
||||
return dir, name
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID directoryID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, directoryID, leaf string) (directoryIDOut string, found bool, err error) {
|
||||
// Find the leaf in directoryID
|
||||
found, err = f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
|
||||
if entity.isDir() && strings.EqualFold(entity.Name, leaf) {
|
||||
directoryIDOut = itoa64(entity.ID)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return directoryIDOut, found, err
|
||||
}
|
||||
|
||||
// Returned from "folder_create"
|
||||
type folderCreateRes struct {
|
||||
response
|
||||
Data struct {
|
||||
DirID int64 `json:"dirId"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with dirID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: linkboxAPIURL,
|
||||
Path: "folder_create",
|
||||
Parameters: url.Values{
|
||||
"token": {f.opt.Token},
|
||||
"name": {leaf},
|
||||
"pid": {dirID},
|
||||
"isShare": {"0"},
|
||||
"canInvite": {"1"},
|
||||
"canShare": {"1"},
|
||||
"withBodyImg": {"1"},
|
||||
"desc": {""},
|
||||
},
|
||||
}
|
||||
|
||||
response := folderCreateRes{}
|
||||
err = getUnmarshaledResponse(ctx, f, opts, &response)
|
||||
if err != nil {
|
||||
// response status 1501 means that directory already exists
|
||||
if response.Status == 1501 {
|
||||
return newID, fmt.Errorf("couldn't find already created directory: %w", fs.ErrorDirNotFound)
|
||||
}
|
||||
return newID, fmt.Errorf("CreateDir failed: %w", err)
|
||||
|
||||
}
|
||||
if response.Data.DirID == 0 {
|
||||
return newID, fmt.Errorf("API returned 0 for ID of newly created directory")
|
||||
}
|
||||
return itoa64(response.Data.DirID), nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// fs.Debugf(f, "List method dir = {%s}", dir)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = f.listAll(ctx, directoryID, "", func(entity *entity) bool {
|
||||
remote := path.Join(dir, entity.Name)
|
||||
if entity.isDir() {
|
||||
id := itoa64(entity.ID)
|
||||
modTime := time.Unix(entity.Ctime, 0)
|
||||
d := fs.NewDir(remote, modTime).SetID(id).SetParentID(itoa64(entity.Pid))
|
||||
entries = append(entries, d)
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, id)
|
||||
} else {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
o.set(entity)
|
||||
entries = append(entries, o)
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// get an entity with leaf from dirID
|
||||
func getEntity(ctx context.Context, f *Fs, leaf string, directoryID string, token string) (*entity, error) {
|
||||
var result *entity
|
||||
var resultErr = fs.ErrorObjectNotFound
|
||||
_, err := f.listAll(ctx, directoryID, leaf, func(entity *entity) bool {
|
||||
if strings.EqualFold(entity.Name, leaf) {
|
||||
// fs.Debugf(f, "getObject found entity.Name {%s} name {%s}", entity.Name, name)
|
||||
if entity.isDir() {
|
||||
result = nil
|
||||
resultErr = fs.ErrorIsDir
|
||||
} else {
|
||||
result = entity
|
||||
resultErr = nil
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, resultErr
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
//
|
||||
// If remote points to a directory then it should return
|
||||
// ErrorIsDir if possible without doing any extra work,
|
||||
// otherwise ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
leaf, dirID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entity, err := getEntity(ctx, f, leaf, dirID, f.opt.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
o.set(entity)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
_, err := f.dirCache.FindDir(ctx, dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if check {
|
||||
entries, err := f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: linkboxAPIURL,
|
||||
Path: "folder_del",
|
||||
Parameters: url.Values{
|
||||
"token": {f.opt.Token},
|
||||
"dirIds": {directoryID},
|
||||
},
|
||||
}
|
||||
|
||||
response := response{}
|
||||
err = getUnmarshaledResponse(ctx, f, opts, &response)
|
||||
if err != nil {
|
||||
// Linkbox has some odd error returns here
|
||||
if response.Status == 403 || response.Status == 500 {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return fmt.Errorf("purge error: %w", err)
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// SetModTime sets modTime on a particular file
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var res *http.Response
|
||||
downloadURL := o.fullURL
|
||||
if downloadURL == "" {
|
||||
_, name := splitDirAndName(o.Remote())
|
||||
newObject, err := getEntity(ctx, o.fs, name, itoa64(o.dirID), o.fs.opt.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if newObject == nil {
|
||||
// fs.Debugf(o.fs, "Open entity is empty: name = {%s}", name)
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
downloadURL = newObject.URL
|
||||
}
|
||||
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: downloadURL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
res, err = o.fs.srv.Call(ctx, opts)
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size == 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
} else if size < 0 {
|
||||
return fmt.Errorf("can't upload files of unknown length")
|
||||
}
|
||||
|
||||
remote := o.Remote()
|
||||
|
||||
// remove the file if it exists
|
||||
if o.itemID != "" {
|
||||
fs.Debugf(o, "Update: removing old file")
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
|
||||
}
|
||||
o.itemID = ""
|
||||
} else {
|
||||
tmpObject, err := o.fs.NewObject(ctx, remote)
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Update: removing old file")
|
||||
err = tmpObject.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Update: failed to remove existing file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
first10m := io.LimitReader(in, 10_485_760)
|
||||
first10mBytes, err := io.ReadAll(first10m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update err in reading file: %w", err)
|
||||
}
|
||||
|
||||
// get upload authorization (step 1)
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: linkboxAPIURL,
|
||||
Path: "get_upload_url",
|
||||
Options: options,
|
||||
Parameters: url.Values{
|
||||
"token": {o.fs.opt.Token},
|
||||
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
|
||||
"fileSize": {itoa64(size)},
|
||||
},
|
||||
}
|
||||
|
||||
getFirstStepResult := getUploadURLResponse{}
|
||||
err = getUnmarshaledResponse(ctx, o.fs, opts, &getFirstStepResult)
|
||||
if err != nil {
|
||||
if getFirstStepResult.Status != 600 {
|
||||
return fmt.Errorf("Update err in unmarshaling response: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch getFirstStepResult.Status {
|
||||
case 1:
|
||||
// upload file using link from first step
|
||||
var res *http.Response
|
||||
|
||||
file := io.MultiReader(bytes.NewReader(first10mBytes), in)
|
||||
|
||||
opts := &rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: getFirstStepResult.Data.SignURL,
|
||||
Options: options,
|
||||
Body: file,
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, opts)
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("update err in uploading file: %w", err)
|
||||
}
|
||||
|
||||
_, err = io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update err in reading response: %w", err)
|
||||
}
|
||||
|
||||
case 600:
|
||||
// Status means that we don't need to upload file
|
||||
// We need only to make second step
|
||||
default:
|
||||
return fmt.Errorf("got unexpected message from Linkbox: %s", getFirstStepResult.Message)
|
||||
}
|
||||
|
||||
leaf, dirID, err := o.fs.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create file item at Linkbox (second step)
|
||||
opts = &rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: linkboxAPIURL,
|
||||
Path: "folder_upload_file",
|
||||
Options: options,
|
||||
Parameters: url.Values{
|
||||
"token": {o.fs.opt.Token},
|
||||
"fileMd5ofPre10m": {fmt.Sprintf("%x", md5.Sum(first10mBytes))},
|
||||
"fileSize": {itoa64(size)},
|
||||
"pid": {dirID},
|
||||
"diyName": {leaf},
|
||||
},
|
||||
}
|
||||
|
||||
getSecondStepResult := getUploadURLResponse{}
|
||||
err = getUnmarshaledResponse(ctx, o.fs, opts, &getSecondStepResult)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update second step failed: %w", err)
|
||||
}
|
||||
|
||||
// Try a few times to read the object after upload for eventual consistency
|
||||
const maxTries = 10
|
||||
var sleepTime = 100 * time.Millisecond
|
||||
var entity *entity
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
entity, err = getEntity(ctx, o.fs, leaf, dirID, o.fs.opt.Token)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
return fmt.Errorf("Update failed to read object: %w", err)
|
||||
}
|
||||
fs.Debugf(o, "Trying to read object after upload: try again in %v (%d/%d)", sleepTime, try, maxTries)
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime *= 2
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.set(entity)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
opts := &rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: linkboxAPIURL,
|
||||
Path: "file_del",
|
||||
Parameters: url.Values{
|
||||
"token": {o.fs.opt.Token},
|
||||
"itemIds": {o.itemID},
|
||||
},
|
||||
}
|
||||
requestResult := getUploadURLResponse{}
|
||||
err := getUnmarshaledResponse(ctx, o.fs, opts, &requestResult)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not Remove: %w", err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file
|
||||
// (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
// Info provides a read only interface to information about a filesystem.
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Linkbox root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
// Returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
/*
|
||||
{
|
||||
"data": {
|
||||
"signUrl": "http://xx -- Then CURL PUT your file with sign url "
|
||||
},
|
||||
"msg": "please use this url to upload (PUT method)",
|
||||
"status": 1
|
||||
}
|
||||
*/
|
||||
|
||||
// All messages have these items
|
||||
type response struct {
|
||||
Message string `json:"msg"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// IsError returns whether response represents an error
|
||||
func (r *response) IsError() bool {
|
||||
return r.Status != 1
|
||||
}
|
||||
|
||||
// Error returns the error state of this response
|
||||
func (r *response) Error() string {
|
||||
return fmt.Sprintf("Linkbox error %d: %s", r.Status, r.Message)
|
||||
}
|
||||
|
||||
// responser is interface covering the response so we can use it when it is embedded.
|
||||
type responser interface {
|
||||
IsError() bool
|
||||
Error() string
|
||||
}
|
||||
|
||||
type getUploadURLData struct {
|
||||
SignURL string `json:"signUrl"`
|
||||
}
|
||||
|
||||
type getUploadURLResponse struct {
|
||||
response
|
||||
Data getUploadURLData `json:"data"`
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: src.Size(),
|
||||
}
|
||||
dir, _ := splitDirAndName(src.Remote())
|
||||
err := f.Mkdir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = o.Update(ctx, in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.DirCacheFlusher = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
17
backend/linkbox/linkbox_test.go
Normal file
17
backend/linkbox/linkbox_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Linkbox filesystem interface
|
||||
package linkbox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/linkbox"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestLinkbox:",
|
||||
NilObject: (*linkbox.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -146,6 +146,11 @@ time we:
|
||||
- Only checksum the size that stat gave
|
||||
- Don't update the stat info for the file
|
||||
|
||||
**NB** do not use this flag on a Windows Volume Shadow (VSS). For some
|
||||
unknown reason, files in a VSS sometimes show different sizes from the
|
||||
directory listing (where the initial stat value comes from on Windows)
|
||||
and when stat is called on them directly. Other copy tools always use
|
||||
the direct stat value and setting this flag will disable that.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
@@ -1123,6 +1128,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
}
|
||||
|
||||
// Update the file info before we start reading
|
||||
err = o.lstat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If not checking updated then limit to current size. This means if
|
||||
// file is being extended, readers will read a o.Size() bytes rather
|
||||
// than the new size making for a consistent upload.
|
||||
@@ -1287,7 +1298,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Fetch and set metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
@@ -1436,6 +1447,10 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -324,6 +325,37 @@ the --onedrive-av-override flag, or av_override = true in the config
|
||||
file.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "delta",
|
||||
Default: false,
|
||||
Help: strings.ReplaceAll(`If set rclone will use delta listing to implement recursive listings.
|
||||
|
||||
If this flag is set the the onedrive backend will advertise |ListR|
|
||||
support for recursive listings.
|
||||
|
||||
Setting this flag speeds up these things greatly:
|
||||
|
||||
rclone lsf -R onedrive:
|
||||
rclone size onedrive:
|
||||
rclone rc vfs/refresh recursive=true
|
||||
|
||||
**However** the delta listing API **only** works at the root of the
|
||||
drive. If you use it not at the root then it recurses from the root
|
||||
and discards all the data that is not under the directory you asked
|
||||
for. So it will be correct but may not be very efficient.
|
||||
|
||||
This is why this flag is not set as the default.
|
||||
|
||||
As a rule of thumb if nearly all of your data is under rclone's root
|
||||
directory (the |root/directory| in |onedrive:root/directory|) then
|
||||
using this flag will be be a big performance win. If your data is
|
||||
mostly not under the root then using this flag will be a big
|
||||
performance loss.
|
||||
|
||||
It is recommended if you are mounting your onedrive at the root
|
||||
(or near the root when using crypt) and using rclone |rc vfs/refresh|.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -645,6 +677,7 @@ type Options struct {
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
AVOverride bool `config:"av_override"`
|
||||
Delta bool `config:"delta"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -656,6 +689,7 @@ type Fs struct {
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the OneDrive server
|
||||
unAuth *rest.Client // no authentication connection to the OneDrive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -914,8 +948,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||
}
|
||||
@@ -929,6 +964,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
unAuth: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
@@ -976,6 +1012,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// ListR only supported if delta set
|
||||
if !f.opt.Delta {
|
||||
f.features.ListR = nil
|
||||
}
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
@@ -1204,10 +1245,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
|
||||
entry, err := f.itemToDirEntry(ctx, dir, info)
|
||||
if err == nil {
|
||||
entries = append(entries, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1302,6 +1347,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1333,6 +1381,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
@@ -2195,7 +2249,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
Options: options,
|
||||
}
|
||||
_, _ = chunk.Seek(skip, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.unAuth.Call(ctx, &opts)
|
||||
if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
|
||||
fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
|
||||
pos, posErr := o.getPosition(ctx, url)
|
||||
@@ -2711,6 +2765,7 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package quickxorhash
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -171,7 +171,9 @@ var _ hash.Hash = (*quickXorHash)(nil)
|
||||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
rand.Read(buf)
|
||||
n, err := rand.Read(buf)
|
||||
require.NoError(b, err)
|
||||
require.Equal(b, len(buf), n)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -70,6 +70,9 @@ func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.O
|
||||
if opt.Region != "" {
|
||||
client.SetRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" {
|
||||
client.Host = opt.Endpoint
|
||||
}
|
||||
modifyClient(ctx, opt, &client.BaseClient)
|
||||
return &client, err
|
||||
}
|
||||
|
||||
@@ -7,12 +7,15 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -23,6 +26,7 @@ const (
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
operationRestore = "restore"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
@@ -77,6 +81,42 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
}, {
|
||||
Name: operationRestore,
|
||||
Short: "Restore objects from Archive to Standard storage",
|
||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||
rclone backend restore oos:bucket -o hours=HOURS
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
|
||||
It returns a list of status dictionaries with Object Name and Status
|
||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
{
|
||||
"Object": "test.txt"
|
||||
"Status": "RESTORED",
|
||||
},
|
||||
{
|
||||
"Object": "test/file4.txt"
|
||||
"Status": "RESTORED",
|
||||
}
|
||||
]
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -113,6 +153,8 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
case operationRestore:
|
||||
return f.restore(ctx, opt)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@@ -290,3 +332,63 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat
|
||||
}
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) {
|
||||
req := objectstorage.RestoreObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||
}
|
||||
if hours := opt["hours"]; hours != "" {
|
||||
ihours, err := strconv.Atoi(hours)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad value for hours: %w", err)
|
||||
}
|
||||
req.RestoreObjectsDetails.Hours = &ihours
|
||||
}
|
||||
type status struct {
|
||||
Object string
|
||||
Status string
|
||||
}
|
||||
var (
|
||||
outMu sync.Mutex
|
||||
out = []status{}
|
||||
err error
|
||||
)
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
// Remember this is run --checkers times concurrently
|
||||
o, ok := obj.(*Object)
|
||||
st := status{Object: obj.Remote(), Status: "RESTORED"}
|
||||
defer func() {
|
||||
outMu.Lock()
|
||||
out = append(out, st)
|
||||
outMu.Unlock()
|
||||
}()
|
||||
if !ok {
|
||||
st.Status = "Not an OCI Object Storage object"
|
||||
return
|
||||
}
|
||||
if o.storageTier == nil || (*o.storageTier != "archive") {
|
||||
st.Status = "Object not in Archive storage tier"
|
||||
return
|
||||
}
|
||||
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||
return
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
reqCopy := req
|
||||
reqCopy.BucketName = &bucket
|
||||
reqCopy.ObjectName = &bucketPath
|
||||
var response objectstorage.RestoreObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RestoreObjects(ctx, reqCopy)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
st.Status = err.Error()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -295,7 +295,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
@@ -399,13 +399,17 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
|
||||
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
f := o.fs
|
||||
if f.opt.AttemptResumeUpload {
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "failed to create bucket: %v, err: %v", bucketName, err)
|
||||
return uploadID, existingParts, err
|
||||
}
|
||||
if o.fs.opt.AttemptResumeUpload {
|
||||
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
|
||||
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
|
||||
if err == nil && len(resumeUploads) > 0 {
|
||||
uploadID = *resumeUploads[0].UploadId
|
||||
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
|
||||
existingParts, err = o.fs.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
|
||||
if err == nil {
|
||||
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
|
||||
return uploadID, existingParts, err
|
||||
|
||||
@@ -401,7 +401,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
multipart = false
|
||||
}
|
||||
if multipart {
|
||||
err = o.uploadMultipart(ctx, src, in)
|
||||
err = o.uploadMultipart(ctx, src, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -138,6 +138,14 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement backed that represents a remote object storage server
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
|
||||
@@ -30,4 +30,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -948,6 +948,12 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
@@ -1280,6 +1286,7 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -770,6 +770,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return o.(*Object).url, nil
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
@@ -1110,6 +1116,7 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// OverwriteOnCopyMode is a conflict resolve mode during copy. Files with conflicting names will be overwritten
|
||||
const OverwriteOnCopyMode = "overwrite"
|
||||
// OverwriteMode is a conflict resolve mode during copy or move. Files with conflicting names will be overwritten
|
||||
const OverwriteMode = "overwrite"
|
||||
|
||||
// ProfileInfo is a profile info about quota
|
||||
type ProfileInfo struct {
|
||||
|
||||
@@ -193,6 +193,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
if f.opt.APIKey != "" {
|
||||
@@ -728,7 +729,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Resolve: true,
|
||||
MTime: api.JSONTime(srcObj.ModTime(ctx)),
|
||||
Name: dstLeaf,
|
||||
ResolveMode: api.OverwriteOnCopyMode,
|
||||
ResolveMode: api.OverwriteMode,
|
||||
}
|
||||
|
||||
result := &api.File{}
|
||||
@@ -788,11 +789,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
params := &api.FileCopyMoveOneParams{
|
||||
ID: srcObj.id,
|
||||
Target: directoryID,
|
||||
Resolve: false,
|
||||
MTime: api.JSONTime(srcObj.ModTime(ctx)),
|
||||
Name: dstLeaf,
|
||||
ID: srcObj.id,
|
||||
Target: directoryID,
|
||||
Resolve: true,
|
||||
MTime: api.JSONTime(srcObj.ModTime(ctx)),
|
||||
Name: dstLeaf,
|
||||
ResolveMode: api.OverwriteMode,
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
|
||||
230
backend/s3/s3.go
230
backend/s3/s3.go
@@ -15,6 +15,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -60,6 +61,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// The S3 providers
|
||||
@@ -139,6 +141,9 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "RackCorp",
|
||||
Help: "RackCorp Object Storage",
|
||||
}, {
|
||||
Value: "Rclone",
|
||||
Help: "Rclone S3 Server",
|
||||
}, {
|
||||
Value: "Scaleway",
|
||||
Help: "Scaleway Object Storage",
|
||||
@@ -2181,10 +2186,10 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
Help: `Concurrency for multipart uploads and copies.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
concurrently for multipart uploads and copies.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
@@ -2215,6 +2220,13 @@ If it is set then rclone will use v2 authentication.
|
||||
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_dual_stack",
|
||||
Help: `If true use AWS S3 dual-stack endpoint (IPv6 support).
|
||||
|
||||
See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_accelerate_endpoint",
|
||||
Provider: "AWS",
|
||||
@@ -2422,6 +2434,19 @@ See [the time option docs](/docs/#time-option) for valid formats.
|
||||
`,
|
||||
Default: fs.Time{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "version_deleted",
|
||||
Help: `Show deleted file markers when using versions.
|
||||
|
||||
This shows deleted file markers in the listing when using versions. These will appear
|
||||
as 0 size files. The only operation which can be performed on them is deletion.
|
||||
|
||||
Deleting a delete marker will reveal the previous version.
|
||||
|
||||
Deleted files will always show with a timestamp.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "decompress",
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
@@ -2488,6 +2513,45 @@ In this case, you might want to try disabling this option.
|
||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_already_exists",
|
||||
Help: strings.ReplaceAll(`Set if rclone should report BucketAlreadyExists errors on bucket creation.
|
||||
|
||||
At some point during the evolution of the s3 protocol, AWS started
|
||||
returning an |AlreadyOwnedByYou| error when attempting to create a
|
||||
bucket that the user already owned, rather than a
|
||||
|BucketAlreadyExists| error.
|
||||
|
||||
Unfortunately exactly what has been implemented by s3 clones is a
|
||||
little inconsistent, some return |AlreadyOwnedByYou|, some return
|
||||
|BucketAlreadyExists| and some return no error at all.
|
||||
|
||||
This is important to rclone because it ensures the bucket exists by
|
||||
creating it on quite a lot of operations (unless
|
||||
|--s3-no-check-bucket| is used).
|
||||
|
||||
If rclone knows the provider can return |AlreadyOwnedByYou| or returns
|
||||
no error then it can report |BucketAlreadyExists| errors when the user
|
||||
attempts to create a bucket not owned by them. Otherwise rclone
|
||||
ignores the |BucketAlreadyExists| error which can lead to confusion.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_multipart_uploads",
|
||||
Help: `Set if rclone should use multipart uploads.
|
||||
|
||||
You can change this if you want to disable the use of multipart uploads.
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2572,6 +2636,7 @@ type Options struct {
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
UseDualStack bool `config:"use_dual_stack"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
@@ -2610,10 +2675,13 @@ type Options struct {
|
||||
UsePresignedRequest bool `config:"use_presigned_request"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
VersionDeleted bool `config:"version_deleted"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
UseAlreadyExists fs.Tristate `config:"use_already_exists"`
|
||||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -2868,6 +2936,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
case v.AccessKeyID == "" && v.SecretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
cred = credentials.AnonymousCredentials
|
||||
fs.Debugf(nil, "Using anonymous credentials - did you mean to set env_auth=true?")
|
||||
case v.AccessKeyID == "":
|
||||
return nil, nil, errors.New("access_key_id not found")
|
||||
case v.SecretAccessKey == "":
|
||||
@@ -2896,6 +2965,9 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
}
|
||||
if opt.UseDualStack {
|
||||
awsConfig.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
awsSessionOpts := session.Options{
|
||||
@@ -2958,13 +3030,23 @@ func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if f.opt.Provider != "Rclone" {
|
||||
err = checkUploadCutoff(cs)
|
||||
}
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// setEndpointValueForIDriveE2 gets user region endpoint against the Access Key details by calling the API
|
||||
func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
|
||||
value, ok := m.Get(fs.ConfigProvider)
|
||||
@@ -3012,6 +3094,8 @@ func setQuirks(opt *Options) {
|
||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
useMultipartUploads = true // Set if provider supports multipart uploads
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@@ -3019,18 +3103,22 @@ func setQuirks(opt *Options) {
|
||||
mightGzip = false // Never auto gzips objects
|
||||
case "Alibaba":
|
||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||
useAlreadyExists = true // returns 200 OK
|
||||
case "HuaweiOBS":
|
||||
// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
|
||||
urlEncodeListings = false
|
||||
listObjectsV2 = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Ceph":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "ChinaMobile":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Cloudflare":
|
||||
virtualHostStyle = false
|
||||
useMultipartEtag = false // currently multipart Etags are random
|
||||
@@ -3038,88 +3126,111 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "DigitalOcean":
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Dreamhost":
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "IBMCOS":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // returns BucketAlreadyExists
|
||||
case "IDrive":
|
||||
virtualHostStyle = false
|
||||
useAlreadyExists = false // untested
|
||||
case "IONOS":
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Petabox":
|
||||
// No quirks
|
||||
useAlreadyExists = false // untested
|
||||
case "Liara":
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Linode":
|
||||
// No quirks
|
||||
useAlreadyExists = true // returns 200 OK
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
useAlreadyExists = false // untested
|
||||
case "Minio":
|
||||
virtualHostStyle = false
|
||||
case "Netease":
|
||||
listObjectsV2 = false // untested
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // untested
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // untested
|
||||
case "Rclone":
|
||||
listObjectsV2 = true
|
||||
urlEncodeListings = true
|
||||
virtualHostStyle = false
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false
|
||||
// useMultipartUploads = false - set this manually
|
||||
case "Scaleway":
|
||||
// Scaleway can only have 1000 parts in an upload
|
||||
if opt.MaxUploadParts > 1000 {
|
||||
opt.MaxUploadParts = 1000
|
||||
}
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "SeaweedFS":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // untested
|
||||
case "StackPath":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Storj":
|
||||
// Force chunk size to >= 64 MiB
|
||||
if opt.ChunkSize < 64*fs.Mebi {
|
||||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
useAlreadyExists = false // returns BucketAlreadyExists
|
||||
case "Synology":
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false // untested
|
||||
case "TencentCOS":
|
||||
listObjectsV2 = false // untested
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
useAlreadyExists = true // returns 200 OK
|
||||
case "Leviia":
|
||||
// No quirks
|
||||
useAlreadyExists = false // untested
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
virtualHostStyle = false
|
||||
useAlreadyExists = false // untested
|
||||
case "GCS":
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
default:
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
fallthrough
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
default:
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false
|
||||
}
|
||||
|
||||
// Path Style vs Virtual Host style
|
||||
@@ -3159,6 +3270,22 @@ func setQuirks(opt *Options) {
|
||||
opt.UseAcceptEncodingGzip.Valid = true
|
||||
opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip
|
||||
}
|
||||
|
||||
// Has the provider got AlreadyOwnedByYou error?
|
||||
if !opt.UseAlreadyExists.Valid {
|
||||
opt.UseAlreadyExists.Valid = true
|
||||
opt.UseAlreadyExists.Value = useAlreadyExists
|
||||
}
|
||||
|
||||
// Set the correct use multipart uploads if not manually set
|
||||
if !opt.UseMultipartUploads.Valid {
|
||||
opt.UseMultipartUploads.Valid = true
|
||||
opt.UseMultipartUploads.Value = useMultipartUploads
|
||||
}
|
||||
if !opt.UseMultipartUploads.Value {
|
||||
opt.UploadCutoff = math.MaxInt64
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -3271,6 +3398,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
if !opt.UseMultipartUploads.Value {
|
||||
fs.Debugf(f, "Disabling multipart uploads")
|
||||
f.features.OpenChunkWriter = nil
|
||||
}
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -3315,6 +3446,7 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
||||
withVersions: f.opt.Versions,
|
||||
findFile: true,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
}, func(gotRemote string, object *s3.Object, objectVersionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
@@ -3376,6 +3508,10 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = stringClonePointer(info.StorageClass)
|
||||
o.versionID = stringClonePointer(versionID)
|
||||
// If is delete marker, show that metadata has been read as there is none to read
|
||||
if info.Size == isDeleteMarker {
|
||||
o.meta = map[string]string{}
|
||||
}
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -3626,6 +3762,9 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output,
|
||||
// Set up the request for next time
|
||||
ls.req.KeyMarker = respVersions.NextKeyMarker
|
||||
ls.req.VersionIdMarker = respVersions.NextVersionIdMarker
|
||||
if aws.BoolValue(respVersions.IsTruncated) && ls.req.KeyMarker == nil {
|
||||
return nil, nil, errors.New("s3 protocol error: received versions listing with IsTruncated set with no NextKeyMarker")
|
||||
}
|
||||
|
||||
// If we are URL encoding then must decode the marker
|
||||
if ls.req.KeyMarker != nil && ls.req.EncodingType != nil {
|
||||
@@ -3670,7 +3809,7 @@ func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output,
|
||||
//structs.SetFrom(obj, objVersion)
|
||||
setFrom_s3Object_s3ObjectVersion(obj, objVersion)
|
||||
// Adjust the file names
|
||||
if !ls.usingVersionAt && !aws.BoolValue(objVersion.IsLatest) {
|
||||
if !ls.usingVersionAt && (!aws.BoolValue(objVersion.IsLatest) || objVersion.Size == isDeleteMarker) {
|
||||
if obj.Key != nil && objVersion.LastModified != nil {
|
||||
*obj.Key = version.Add(*obj.Key, *objVersion.LastModified)
|
||||
}
|
||||
@@ -3938,6 +4077,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
addBucket: addBucket,
|
||||
withVersions: f.opt.Versions,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
|
||||
if err != nil {
|
||||
@@ -4024,6 +4164,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
recurse: true,
|
||||
withVersions: f.opt.Versions,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
|
||||
if err != nil {
|
||||
@@ -4187,8 +4328,17 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
|
||||
}
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
|
||||
switch awsErr.Code() {
|
||||
case "BucketAlreadyOwnedByYou":
|
||||
err = nil
|
||||
case "BucketAlreadyExists", "BucketNameUnavailable":
|
||||
if f.opt.UseAlreadyExists.Value {
|
||||
// We can trust BucketAlreadyExists to mean not owned by us, so make it non retriable
|
||||
err = fserrors.NoRetryError(err)
|
||||
} else {
|
||||
// We can't trust BucketAlreadyExists to mean not owned by us, so ignore it
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -4358,10 +4508,20 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
|
||||
fs.Debugf(src, "Starting multipart copy with %d parts", numParts)
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
var (
|
||||
parts = make([]*s3.CompletedPart, numParts)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
)
|
||||
g.SetLimit(f.opt.UploadConcurrency)
|
||||
for partNum := int64(1); partNum <= numParts; partNum++ {
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
partNum := partNum
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
partNum := partNum // for closure
|
||||
g.Go(func() error {
|
||||
var uout *s3.UploadPartCopyOutput
|
||||
uploadPartReq := &s3.UploadPartCopyInput{}
|
||||
//structs.SetFrom(uploadPartReq, copyReq)
|
||||
setFrom_s3UploadPartCopyInput_s3CopyObjectInput(uploadPartReq, copyReq)
|
||||
@@ -4370,18 +4530,24 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
uploadPartReq.PartNumber = &partNum
|
||||
uploadPartReq.UploadId = uid
|
||||
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
|
||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
uout, err = f.c.UploadPartCopyWithContext(gCtx, uploadPartReq)
|
||||
return f.shouldRetry(gCtx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return f.shouldRetry(ctx, err)
|
||||
return err
|
||||
}
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
parts[partNum-1] = &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
ETag: uout.CopyPartResult.ETag,
|
||||
})
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@@ -4781,6 +4947,7 @@ func (f *Fs) restoreStatus(ctx context.Context, all bool) (out []restoreStatusOu
|
||||
recurse: true,
|
||||
withVersions: f.opt.Versions,
|
||||
versionAt: f.opt.VersionAt,
|
||||
hidden: f.opt.VersionDeleted,
|
||||
restoreStatus: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
|
||||
@@ -5561,6 +5728,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
var mOut *s3.CreateMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
mOut, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
||||
if err == nil {
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -5904,7 +6078,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
@@ -6070,7 +6244,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var err error
|
||||
var ui uploadInfo
|
||||
if multipart {
|
||||
wantETag, gotETag, versionID, ui, err = o.uploadMultipart(ctx, src, in)
|
||||
wantETag, gotETag, versionID, ui, err = o.uploadMultipart(ctx, src, in, options...)
|
||||
} else {
|
||||
ui, err = o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -393,6 +394,41 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Mkdir", func(t *testing.T) {
|
||||
// Test what happens when we create a bucket we already own and see whether the
|
||||
// quirk is set correctly
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.rootBucket,
|
||||
ACL: stringPointerOrNil(f.opt.BucketACL),
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
var errString string
|
||||
if err == nil {
|
||||
errString = "No Error"
|
||||
} else if awsErr, ok := err.(awserr.Error); ok {
|
||||
errString = awsErr.Code()
|
||||
} else {
|
||||
assert.Fail(t, "Unknown error %T %v", err, err)
|
||||
}
|
||||
t.Logf("Creating a bucket we already have created returned code: %s", errString)
|
||||
switch errString {
|
||||
case "BucketAlreadyExists":
|
||||
assert.False(t, f.opt.UseAlreadyExists.Value, "Need to clear UseAlreadyExists quirk")
|
||||
case "No Error", "BucketAlreadyOwnedByYou":
|
||||
assert.True(t, f.opt.UseAlreadyExists.Value, "Need to set UseAlreadyExists quirk")
|
||||
default:
|
||||
assert.Fail(t, "Unknown error string %q", errString)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.CleanUpHidden(ctx))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -9,6 +12,13 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
|
||||
ctx, opt := context.Background(), new(Options)
|
||||
opt.Provider = "AWS"
|
||||
client := getClient(ctx, opt)
|
||||
return ctx, opt, client
|
||||
}
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
@@ -39,6 +49,28 @@ func TestIntegration2(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAWSDualStackOption(t *testing.T) {
|
||||
{
|
||||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if !strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, _, _ := s3Connection(ctx, opt, client)
|
||||
if strings.Contains(s3Conn.Endpoint, "dualstack") {
|
||||
t.Errorf("dualstack failed got: %s, NOT wanted: dualstack", s3Conn.Endpoint)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
@@ -47,4 +79,12 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -449,6 +449,26 @@ Example:
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_is_hardlink",
|
||||
Default: false,
|
||||
Help: `Set to enable server side copies using hardlinks.
|
||||
|
||||
The SFTP protocol does not define a copy command so normally server
|
||||
side copies are not allowed with the sftp backend.
|
||||
|
||||
However the SFTP protocol does support hardlinking, and if you enable
|
||||
this flag then the sftp backend will support server side copies. These
|
||||
will be implemented by doing a hardlink from the source to the
|
||||
destination.
|
||||
|
||||
Not all sftp servers support this.
|
||||
|
||||
Note that hardlinking two files together will use no additional space
|
||||
as the source and the destination will be the same file.
|
||||
|
||||
This feature may be useful backups made with --copy-dest.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -490,6 +510,7 @@ type Options struct {
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
SSH fs.SpaceSepList `config:"ssh"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
CopyIsHardlink bool `config:"copy_is_hardlink"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -1049,6 +1070,10 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
SlowHash: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if !opt.CopyIsHardlink {
|
||||
// Disable server side copy unless --sftp-copy-is-hardlink is set
|
||||
f.features.Copy = nil
|
||||
}
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
@@ -1401,6 +1426,43 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Copy server side copies a remote sftp file object using hardlinks
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
if !f.opt.CopyIsHardlink {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := f.mkParentDir(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy mkParentDir failed: %w", err)
|
||||
}
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy: %w", err)
|
||||
}
|
||||
srcPath, dstPath := srcObj.path(), path.Join(f.absRoot, remote)
|
||||
err = c.sftpClient.Link(srcPath, dstPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if sftpErr, ok := err.(*sftp.StatusError); ok {
|
||||
if sftpErr.FxCode() == sftp.ErrSSHFxOpUnsupported {
|
||||
// Remote doesn't support Link
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy NewObject failed: %w", err)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
@@ -2120,6 +2182,7 @@ var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
@@ -93,8 +94,7 @@ func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
||||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||
|
||||
// Allow the command a short time only to shut down
|
||||
// FIXME enable when we get rid of go1.19
|
||||
// s.cmd.WaitDelay = time.Second
|
||||
s.cmd.WaitDelay = time.Second
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -1176,6 +1176,12 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
f.tokenRenewer.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
@@ -1466,6 +1472,7 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/hirochachacha/go-smb2"
|
||||
smb2 "github.com/cloudsoda/go-smb2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -40,7 +40,7 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
},
|
||||
}
|
||||
|
||||
session, err := d.DialContext(ctx, tconn)
|
||||
session, err := d.DialConn(ctx, tconn, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/hirochachacha/go-smb2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -178,6 +177,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
CaseInsensitive: opt.CaseInsensitive,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
@@ -477,26 +477,6 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Wrap a smb2.File with a custom Close method
|
||||
type closeSession struct {
|
||||
*smb2.File
|
||||
close func() error
|
||||
closed bool
|
||||
}
|
||||
|
||||
// Close the handle and call the custom code
|
||||
func (c *closeSession) Close() error {
|
||||
err := c.File.Close()
|
||||
if !c.closed {
|
||||
err2 := c.close()
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
c.closed = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
@@ -530,19 +510,10 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
// Connection is returned in the closeSession.Close method
|
||||
c := &closeSession{
|
||||
File: fl,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return c, nil
|
||||
return fl, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
|
||||
@@ -877,6 +877,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
upstreams: usedUpstreams,
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if fserr == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
err = upstream.Prepare(f.upstreams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -121,9 +121,8 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
hashes = make(map[hash.Type]string)
|
||||
hashes[hash.SHA1] = *p.MESha1Hex
|
||||
return hashes
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PropValue is a tagged name and value
|
||||
|
||||
@@ -91,6 +91,9 @@ func init() {
|
||||
}, {
|
||||
Value: "sharepoint-ntlm",
|
||||
Help: "Sharepoint with NTLM authentication, usually self-hosted or on-premises",
|
||||
}, {
|
||||
Value: "rclone",
|
||||
Help: "rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol",
|
||||
}, {
|
||||
Value: "other",
|
||||
Help: "Other site/service or software",
|
||||
@@ -644,6 +647,10 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
// so we must perform an extra check to detect this
|
||||
// condition and return a proper error code.
|
||||
f.checkBeforePurge = true
|
||||
case "rclone":
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
case "other":
|
||||
default:
|
||||
fs.Debugf(f, "Unknown vendor %q", vendor)
|
||||
|
||||
@@ -7,3 +7,7 @@
|
||||
<ankur0493@gmail.com>
|
||||
<agupta@egnyte.com>
|
||||
<ricci@disroot.org>
|
||||
<stoesser@yay-digital.de>
|
||||
<services+github@simjo.st>
|
||||
<seb•ɑƬ•chezwam•ɖɵʈ•org>
|
||||
<allllaboutyou@gmail.com>
|
||||
@@ -6,7 +6,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
@@ -21,23 +20,21 @@ import (
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
debug = flag.Bool("d", false, "Print commands instead of running them.")
|
||||
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
|
||||
debug = flag.Bool("d", false, "Print commands instead of running them")
|
||||
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel")
|
||||
copyAs = flag.String("release", "", "Make copies of the releases with this name")
|
||||
gitLog = flag.String("git-log", "", "git log to include as well")
|
||||
include = flag.String("include", "^.*$", "os/arch regexp to include")
|
||||
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip")
|
||||
extraEnv = flag.String("env", "", "comma separated list of VAR=VALUE env vars to set")
|
||||
macOSSDK = flag.String("macos-sdk", "", "macOS SDK to use")
|
||||
macOSArch = flag.String("macos-arch", "", "macOS arch to use")
|
||||
@@ -140,21 +137,21 @@ func chdir(dir string) {
|
||||
func substitute(inFile, outFile string, data interface{}) {
|
||||
t, err := template.ParseFiles(inFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read template file %q: %v %v", inFile, err)
|
||||
log.Fatalf("Failed to read template file %q: %v", inFile, err)
|
||||
}
|
||||
out, err := os.Create(outFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create output file %q: %v %v", outFile, err)
|
||||
log.Fatalf("Failed to create output file %q: %v", outFile, err)
|
||||
}
|
||||
defer func() {
|
||||
err := out.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to close output file %q: %v %v", outFile, err)
|
||||
log.Fatalf("Failed to close output file %q: %v", outFile, err)
|
||||
}
|
||||
}()
|
||||
err = t.Execute(out, data)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to substitute template file %q: %v %v", inFile, err)
|
||||
log.Fatalf("Failed to substitute template file %q: %v", inFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,101 +199,6 @@ func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// generate system object (syso) file to be picked up by a following go build for embedding icon and version info resources into windows executable
|
||||
func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
type M map[string]interface{}
|
||||
version := strings.TrimPrefix(versionTag, "v")
|
||||
semanticVersion := semver.New(version)
|
||||
|
||||
// Build json input to goversioninfo utility
|
||||
bs, err := json.Marshal(M{
|
||||
"FixedFileInfo": M{
|
||||
"FileVersion": M{
|
||||
"Major": semanticVersion.Major,
|
||||
"Minor": semanticVersion.Minor,
|
||||
"Patch": semanticVersion.Patch,
|
||||
},
|
||||
"ProductVersion": M{
|
||||
"Major": semanticVersion.Major,
|
||||
"Minor": semanticVersion.Minor,
|
||||
"Patch": semanticVersion.Patch,
|
||||
},
|
||||
},
|
||||
"StringFileInfo": M{
|
||||
"CompanyName": "https://rclone.org",
|
||||
"ProductName": "Rclone",
|
||||
"FileDescription": "Rclone",
|
||||
"InternalName": "rclone",
|
||||
"OriginalFilename": "rclone.exe",
|
||||
"LegalCopyright": "The Rclone Authors",
|
||||
"FileVersion": version,
|
||||
"ProductVersion": version,
|
||||
},
|
||||
"IconPath": "../graphics/logo/ico/logo_symbol_color.ico",
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Failed to build version info json: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Write json to temporary file that will only be used by the goversioninfo command executed below.
|
||||
jsonPath, err := filepath.Abs("versioninfo_windows_" + goarch + ".json") // Appending goos and goarch as suffix to avoid any race conditions
|
||||
if err != nil {
|
||||
log.Printf("Failed to resolve path: %v", err)
|
||||
return ""
|
||||
}
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
log.Printf("Failed to write %s: %v", jsonPath, err)
|
||||
return ""
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Remove(jsonPath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Printf("Warning: Couldn't remove generated %s: %v. Please remove it manually.", jsonPath, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Execute goversioninfo utility using the json file as input.
|
||||
// It will produce a system object (syso) file that a following go build should pick up.
|
||||
sysoPath, err := filepath.Abs("../resource_windows_" + goarch + ".syso") // Appending goos and goarch as suffix to avoid any race conditions, and also it is recognized by go build and avoids any builds for other systems considering it
|
||||
if err != nil {
|
||||
log.Printf("Failed to resolve path: %v", err)
|
||||
return ""
|
||||
}
|
||||
args := []string{
|
||||
"goversioninfo",
|
||||
"-o",
|
||||
sysoPath,
|
||||
}
|
||||
if strings.Contains(goarch, "64") {
|
||||
args = append(args, "-64") // Make the syso a 64-bit coff file
|
||||
}
|
||||
if strings.Contains(goarch, "arm") {
|
||||
args = append(args, "-arm") // Make the syso an arm binary
|
||||
}
|
||||
args = append(args, jsonPath)
|
||||
err = runEnv(args, nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return sysoPath
|
||||
}
|
||||
|
||||
// delete generated system object (syso) resource file
|
||||
func cleanupResourceSyso(sysoFilePath string) {
|
||||
if sysoFilePath == "" {
|
||||
return
|
||||
}
|
||||
if err := os.Remove(sysoFilePath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Printf("Warning: Couldn't remove generated %s: %v. Please remove it manually.", sysoFilePath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Trip a version suffix off the arch if present
|
||||
func stripVersion(goarch string) string {
|
||||
i := strings.Index(goarch, "-")
|
||||
@@ -315,17 +217,41 @@ func runOut(command ...string) string {
|
||||
return strings.TrimSpace(string(out))
|
||||
}
|
||||
|
||||
// Generate Windows resource system object file (.syso), which can be picked
|
||||
// up by the following go build for embedding version information and icon
|
||||
// resources into the executable.
|
||||
func generateResourceWindows(version, arch string) func() {
|
||||
sysoPath := fmt.Sprintf("../resource_windows_%s.syso", arch) // Use explicit destination filename, even though it should be same as default, so that we are sure we have the correct reference to it
|
||||
if err := os.Remove(sysoPath); !os.IsNotExist(err) {
|
||||
// Note: This one we choose to treat as fatal, to avoid any risk of picking up an old .syso file without noticing.
|
||||
log.Fatalf("Failed to remove existing Windows %s resource system object file %s: %v", arch, sysoPath, err)
|
||||
}
|
||||
args := []string{"go", "run", "../bin/resource_windows.go", "-arch", arch, "-version", version, "-syso", sysoPath}
|
||||
if err := runEnv(args, nil); err != nil {
|
||||
log.Printf("Warning: Couldn't generate Windows %s resource system object file, binaries will not have version information or icon embedded", arch)
|
||||
return nil
|
||||
}
|
||||
if _, err := os.Stat(sysoPath); err != nil {
|
||||
log.Printf("Warning: Couldn't find generated Windows %s resource system object file, binaries will not have version information or icon embedded", arch)
|
||||
return nil
|
||||
}
|
||||
return func() {
|
||||
if err := os.Remove(sysoPath); err != nil && !os.IsNotExist(err) {
|
||||
log.Printf("Warning: Couldn't remove generated Windows %s resource system object file %s: %v. Please remove it manually.", arch, sysoPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||
goarchBase := stripVersion(goarch)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
sysoPath := buildWindowsResourceSyso(goarch, version)
|
||||
if sysoPath == "" {
|
||||
log.Printf("Warning: Windows binaries will not have file information embedded")
|
||||
if cleanupFn := generateResourceWindows(version, goarchBase); cleanupFn != nil {
|
||||
defer cleanupFn()
|
||||
}
|
||||
defer cleanupResourceSyso(sysoPath)
|
||||
}
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
@@ -348,7 +274,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
)
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + stripVersion(goarch),
|
||||
"GOARCH=" + goarchBase,
|
||||
}
|
||||
if *extraEnv != "" {
|
||||
env = append(env, strings.Split(*extraEnv, ",")...)
|
||||
|
||||
@@ -23,8 +23,6 @@ CATEGORY = re.compile(r"(^[\w/ ]+(?:, *[\w/ ]+)*):\s*(.*)$")
|
||||
backends = [ x for x in os.listdir("backend") if x != "all"]
|
||||
|
||||
backend_aliases = {
|
||||
"amazon cloud drive" : "amazonclouddrive",
|
||||
"acd" : "amazonclouddrive",
|
||||
"google cloud storage" : "googlecloudstorage",
|
||||
"gcs" : "googlecloudstorage",
|
||||
"azblob" : "azureblob",
|
||||
@@ -34,7 +32,6 @@ backend_aliases = {
|
||||
}
|
||||
|
||||
backend_titles = {
|
||||
"amazonclouddrive": "Amazon Cloud Drive",
|
||||
"googlecloudstorage": "Google Cloud Storage",
|
||||
"azureblob": "Azure Blob",
|
||||
"ftp": "FTP",
|
||||
|
||||
@@ -30,7 +30,6 @@ docs = [
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
"alias.md",
|
||||
"amazonclouddrive.md",
|
||||
"s3.md",
|
||||
"b2.md",
|
||||
"box.md",
|
||||
@@ -50,14 +49,17 @@ docs = [
|
||||
"hdfs.md",
|
||||
"hidrive.md",
|
||||
"http.md",
|
||||
"imagekit.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"linkbox.md",
|
||||
"mailru.md",
|
||||
"mega.md",
|
||||
"memory.md",
|
||||
"netstorage.md",
|
||||
"azureblob.md",
|
||||
"azurefiles.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
"oracleobjectstorage.md",
|
||||
@@ -119,6 +121,7 @@ ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
"sponsor.md",
|
||||
"amazonclouddrive.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
|
||||
122
bin/resource_windows.go
Normal file
122
bin/resource_windows.go
Normal file
@@ -0,0 +1,122 @@
|
||||
// Utility program to generate Rclone-specific Windows resource system object
|
||||
// file (.syso), that can be picked up by a following go build for embedding
|
||||
// version information and icon resources into a rclone binary.
|
||||
//
|
||||
// Run it with "go generate", or "go run" to be able to customize with
|
||||
// command-line flags. Note that this program is intended to be run directly
|
||||
// from its original location in the source tree: Default paths are absolute
|
||||
// within the current source tree, which is convenient because it makes it
|
||||
// oblivious to the working directory, and it gives identical result whether
|
||||
// run by "go generate" or "go run", but it will not make sense if this
|
||||
// program's source is moved out from the source tree.
|
||||
//
|
||||
// Can be used for rclone.exe (default), and other binaries such as
|
||||
// librclone.dll (must be specified with flag -binary).
|
||||
//
|
||||
|
||||
//go:generate go run resource_windows.go
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/josephspurrier/goversioninfo"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get path of directory containing the current source file to use for absolute path references within the code tree (as described above)
|
||||
projectDir := ""
|
||||
_, sourceFile, _, ok := runtime.Caller(0)
|
||||
if ok {
|
||||
projectDir = path.Dir(path.Dir(sourceFile)) // Root of the current project working directory
|
||||
}
|
||||
|
||||
// Define flags
|
||||
binary := flag.String("binary", "rclone.exe", `The name of the binary to generate resource for, e.g. "rclone.exe" or "librclone.dll"`)
|
||||
arch := flag.String("arch", runtime.GOARCH, `Architecture of resource file, or the target GOARCH, "386", "amd64", "arm", or "arm64"`)
|
||||
version := flag.String("version", fs.Version, "Version number or tag name")
|
||||
icon := flag.String("icon", path.Join(projectDir, "graphics/logo/ico/logo_symbol_color.ico"), "Path to icon file to embed in an .exe binary")
|
||||
dir := flag.String("dir", projectDir, "Path to output directory where to write the resulting system object file (.syso), with a default name according to -arch (resource_windows_<arch>.syso), only considered if not -syso is specified")
|
||||
syso := flag.String("syso", "", "Path to output resource system object file (.syso) to be created/overwritten, ignores -dir")
|
||||
|
||||
// Parse command-line flags
|
||||
flag.Parse()
|
||||
|
||||
// Handle default value for -file which depends on optional -dir and -arch
|
||||
if *syso == "" {
|
||||
// Use default filename, which includes target GOOS (hardcoded "windows")
|
||||
// and GOARCH (from argument -arch) as suffix, to avoid any race conditions,
|
||||
// and also this will be recognized by go build when it is consuming the
|
||||
// .syso file and will only be used for builds with matching os/arch.
|
||||
*syso = path.Join(*dir, fmt.Sprintf("resource_windows_%s.syso", *arch))
|
||||
}
|
||||
|
||||
// Parse version/tag string argument as a SemVer
|
||||
stringVersion := strings.TrimPrefix(*version, "v")
|
||||
semanticVersion, err := semver.NewVersion(stringVersion)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid version number: %v", err)
|
||||
}
|
||||
|
||||
// Extract binary extension
|
||||
binaryExt := path.Ext(*binary)
|
||||
|
||||
// Create the version info configuration container
|
||||
vi := &goversioninfo.VersionInfo{}
|
||||
|
||||
// FixedFileInfo
|
||||
vi.FixedFileInfo.FileOS = "040004" // VOS_NT_WINDOWS32
|
||||
if strings.EqualFold(binaryExt, ".exe") {
|
||||
vi.FixedFileInfo.FileType = "01" // VFT_APP
|
||||
} else if strings.EqualFold(binaryExt, ".dll") {
|
||||
vi.FixedFileInfo.FileType = "02" // VFT_DLL
|
||||
} else {
|
||||
log.Fatalf("Specified binary must have extension .exe or .dll")
|
||||
}
|
||||
// FixedFileInfo.FileVersion
|
||||
vi.FixedFileInfo.FileVersion.Major = int(semanticVersion.Major)
|
||||
vi.FixedFileInfo.FileVersion.Minor = int(semanticVersion.Minor)
|
||||
vi.FixedFileInfo.FileVersion.Patch = int(semanticVersion.Patch)
|
||||
vi.FixedFileInfo.FileVersion.Build = 0
|
||||
// FixedFileInfo.ProductVersion
|
||||
vi.FixedFileInfo.ProductVersion.Major = int(semanticVersion.Major)
|
||||
vi.FixedFileInfo.ProductVersion.Minor = int(semanticVersion.Minor)
|
||||
vi.FixedFileInfo.ProductVersion.Patch = int(semanticVersion.Patch)
|
||||
vi.FixedFileInfo.ProductVersion.Build = 0
|
||||
|
||||
// StringFileInfo
|
||||
vi.StringFileInfo.CompanyName = "https://rclone.org"
|
||||
vi.StringFileInfo.ProductName = "Rclone"
|
||||
vi.StringFileInfo.FileDescription = "Rclone"
|
||||
vi.StringFileInfo.InternalName = (*binary)[:len(*binary)-len(binaryExt)]
|
||||
vi.StringFileInfo.OriginalFilename = *binary
|
||||
vi.StringFileInfo.LegalCopyright = "The Rclone Authors"
|
||||
vi.StringFileInfo.FileVersion = stringVersion
|
||||
vi.StringFileInfo.ProductVersion = stringVersion
|
||||
|
||||
// Icon (only relevant for .exe, not .dll)
|
||||
if *icon != "" && strings.EqualFold(binaryExt, ".exe") {
|
||||
vi.IconPath = *icon
|
||||
}
|
||||
|
||||
// Build native structures from the configuration data
|
||||
vi.Build()
|
||||
|
||||
// Write the native structures as binary data to a buffer
|
||||
vi.Walk()
|
||||
|
||||
// Write the binary data buffer to file
|
||||
if err := vi.WriteSyso(*syso, *arch); err != nil {
|
||||
log.Fatalf(`Failed to generate Windows %s resource system object file for %v with path "%v": %v`, *arch, *binary, *syso, err)
|
||||
}
|
||||
}
|
||||
24
bin/test_metadata_mapper.py
Executable file
24
bin/test_metadata_mapper.py
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
A demo metadata mapper
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
def main():
|
||||
i = json.load(sys.stdin)
|
||||
# Add tag to description
|
||||
metadata = i["Metadata"]
|
||||
if "description" in metadata:
|
||||
metadata["description"] += " [migrated from domain1]"
|
||||
else:
|
||||
metadata["description"] = "[migrated from domain1]"
|
||||
# Modify owner
|
||||
if "owner" in metadata:
|
||||
metadata["owner"] = metadata["owner"].replace("domain1.com", "domain2.com")
|
||||
o = { "Metadata": metadata }
|
||||
json.dump(o, sys.stdout, indent="\t")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -27,6 +27,7 @@ def add_email(name, email):
|
||||
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
||||
|
||||
def main():
|
||||
# Add emails from authors
|
||||
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
|
||||
out = out.decode("utf-8")
|
||||
|
||||
@@ -43,5 +44,23 @@ def main():
|
||||
previous.add(email)
|
||||
add_email(name, email)
|
||||
|
||||
# Add emails from Co-authored-by: lines
|
||||
out = subprocess.check_output(["git", "log", '-i', '--grep', 'Co-authored-by:', "master"])
|
||||
out = out.decode("utf-8")
|
||||
co_authored_by = re.compile(r"(?i)Co-authored-by:\s+(.*?)\s+<([^>]+)>$")
|
||||
|
||||
for line in out.split("\n"):
|
||||
line = line.strip()
|
||||
m = co_authored_by.search(line)
|
||||
if not m:
|
||||
continue
|
||||
name, email = m.group(1), m.group(2)
|
||||
name = name.strip()
|
||||
email = email.strip()
|
||||
if email in previous:
|
||||
continue
|
||||
previous.add(email)
|
||||
add_email(name, email)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/move"
|
||||
_ "github.com/rclone/rclone/cmd/moveto"
|
||||
_ "github.com/rclone/rclone/cmd/ncdu"
|
||||
_ "github.com/rclone/rclone/cmd/nfsmount"
|
||||
_ "github.com/rclone/rclone/cmd/obscure"
|
||||
_ "github.com/rclone/rclone/cmd/purge"
|
||||
_ "github.com/rclone/rclone/cmd/rc"
|
||||
|
||||
@@ -2,16 +2,19 @@
|
||||
package bilib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// FsPath converts Fs to a suitable rclone argument
|
||||
func FsPath(f fs.Fs) string {
|
||||
func FsPath(f fs.Info) string {
|
||||
name, path, slash := f.Name(), f.Root(), "/"
|
||||
if name == "local" {
|
||||
slash = string(os.PathSeparator)
|
||||
@@ -38,5 +41,57 @@ var nonCanonicalChars = regexp.MustCompile(`[\s\\/:?*]`)
|
||||
|
||||
// SessionName makes a unique base name for the sync operation
|
||||
func SessionName(fs1, fs2 fs.Fs) string {
|
||||
return CanonicalPath(FsPath(fs1)) + ".." + CanonicalPath(FsPath(fs2))
|
||||
return StripHexString(CanonicalPath(FsPath(fs1))) + ".." + StripHexString(CanonicalPath(FsPath(fs2)))
|
||||
}
|
||||
|
||||
// StripHexString strips the (first) canonical {hexstring} suffix
|
||||
func StripHexString(path string) string {
|
||||
open := strings.IndexRune(path, '{')
|
||||
close := strings.IndexRune(path, '}')
|
||||
if open >= 0 && close > open {
|
||||
return path[:open] + path[close+1:] // (trailing underscore)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// HasHexString returns true if path contains at least one canonical {hexstring} suffix
|
||||
func HasHexString(path string) bool {
|
||||
open := strings.IndexRune(path, '{')
|
||||
if open >= 0 && strings.IndexRune(path, '}') > open {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BasePath joins the workDir with the SessionName, stripping {hexstring} suffix if necessary
|
||||
func BasePath(ctx context.Context, workDir string, fs1, fs2 fs.Fs) string {
|
||||
suffixedSession := CanonicalPath(FsPath(fs1)) + ".." + CanonicalPath(FsPath(fs2))
|
||||
suffixedBasePath := filepath.Join(workDir, suffixedSession)
|
||||
listing1 := suffixedBasePath + ".path1.lst"
|
||||
listing2 := suffixedBasePath + ".path2.lst"
|
||||
|
||||
sessionName := SessionName(fs1, fs2)
|
||||
basePath := filepath.Join(workDir, sessionName)
|
||||
|
||||
// Normalize to non-canonical version for overridden configs
|
||||
// to ensure that backend-specific flags don't change the listing filename.
|
||||
// For backward-compatibility, we first check if we found a listing file with the suffixed version.
|
||||
// If so, we rename it (and overwrite non-suffixed version, if any.)
|
||||
// If not, we carry on with the non-suffixed version.
|
||||
// We should only find a suffixed version if bisync v1.66 or older created it.
|
||||
if HasHexString(suffixedSession) && FileExists(listing1) {
|
||||
fs.Infof(listing1, "renaming to: %s", basePath+".path1.lst")
|
||||
if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path1.lst") {
|
||||
_ = os.Rename(listing1, basePath+".path1.lst")
|
||||
}
|
||||
}
|
||||
if HasHexString(suffixedSession) && FileExists(listing2) {
|
||||
fs.Infof(listing2, "renaming to: %s", basePath+".path2.lst")
|
||||
if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path2.lst") {
|
||||
_ = os.Rename(listing2, basePath+".path2.lst")
|
||||
} else {
|
||||
return suffixedBasePath
|
||||
}
|
||||
}
|
||||
return basePath
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ func FileExists(file string) bool {
|
||||
return !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// CopyFileIfExists is like CopyFile but does to fail if source does not exist
|
||||
// CopyFileIfExists is like CopyFile but does not fail if source does not exist
|
||||
func CopyFileIfExists(srcFile, dstFile string) error {
|
||||
if !FileExists(srcFile) {
|
||||
return nil
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names comprises a set of file names
|
||||
@@ -59,3 +61,105 @@ func SaveList(list []string, path string) error {
|
||||
}
|
||||
return os.WriteFile(path, buf.Bytes(), PermSecure)
|
||||
}
|
||||
|
||||
// AliasMap comprises a pair of names that are not equal but treated as equal for comparison purposes
|
||||
// For example, when normalizing unicode and casing
|
||||
// This helps reduce repeated normalization functions, which really slow things down
|
||||
type AliasMap map[string]string
|
||||
|
||||
// Add adds new pair to the set, in both directions
|
||||
func (am AliasMap) Add(name1, name2 string) {
|
||||
if name1 != name2 {
|
||||
am[name1] = name2
|
||||
am[name2] = name1
|
||||
}
|
||||
}
|
||||
|
||||
// Alias returns the alternate version, if any, else the original.
|
||||
func (am AliasMap) Alias(name1 string) string {
|
||||
// note: we don't need to check normalization settings, because we already did it in March.
|
||||
// the AliasMap will only exist if March paired up two unequal filenames.
|
||||
name2, ok := am[name1]
|
||||
if ok {
|
||||
return name2
|
||||
}
|
||||
return name1
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync"
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
@@ -29,12 +30,16 @@ import (
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -71,6 +76,16 @@ var logReplacements = []string{
|
||||
`^NOTICE: too_many_(requests|write_operations)/\.*: Too many requests or write operations.*$`, dropMe,
|
||||
`^NOTICE: Dropbox root .*?: Forced to upload files to set modification times on this backend.$`, dropMe,
|
||||
`^INFO : .*?: src and dst identical but can't set mod time without deleting and re-uploading$`, dropMe,
|
||||
// ignore crypt info messages
|
||||
`^INFO : .*?: Crypt detected! Using cryptcheck instead of check. \(Use --size-only or --ignore-checksum to disable\)$`, dropMe,
|
||||
// ignore drive info messages
|
||||
`^NOTICE:.*?Files of unknown size \(such as Google Docs\) do not sync reliably with --checksum or --size-only\. Consider using modtime instead \(the default\) or --drive-skip-gdocs.*?$`, dropMe,
|
||||
// ignore differences in backend features
|
||||
`^.*?"HashType1":.*?$`, dropMe,
|
||||
`^.*?"HashType2":.*?$`, dropMe,
|
||||
`^.*?"SlowHashDetected":.*?$`, dropMe,
|
||||
`^.*? for same-side diffs on .*?$`, dropMe,
|
||||
`^.*?Downloading hashes.*?$`, dropMe,
|
||||
}
|
||||
|
||||
// Some dry-run messages differ depending on the particular remote.
|
||||
@@ -96,17 +111,23 @@ var logHoppers = []string{
|
||||
// subdirectories. The order inconsistency initially showed up in the
|
||||
// listings and triggered reordering of log messages, but the actual
|
||||
// files will in fact match.
|
||||
`ERROR : - +Access test failed: Path[12] file not found in Path[12] - .*`,
|
||||
`.* +.....Access test failed: Path[12] file not found in Path[12].*`,
|
||||
|
||||
// Test case `resync` suffered from the order of queued copies.
|
||||
`(?:INFO |NOTICE): - Path2 Resync will copy to Path1 +- .*`,
|
||||
|
||||
// Test case `normalization` can have random order of fix-case files.
|
||||
`(?:INFO |NOTICE): .*: Fixed case by renaming to: .*`,
|
||||
|
||||
// order of files re-checked prior to a conflict rename
|
||||
`ERROR : .*: md5 differ.*`,
|
||||
}
|
||||
|
||||
// Some log lines can contain Windows path separator that must be
|
||||
// converted to "/" in every matching token to match golden logs.
|
||||
var logLinesWithSlash = []string{
|
||||
`\(\d\d\) : (touch-glob|touch-copy|copy-file|copy-as|copy-dir|delete-file) `,
|
||||
`INFO : - Path[12] +Queue copy to Path[12] `,
|
||||
`.*\(\d\d\) :.*(fix-names|touch-glob|touch-copy|copy-file|copy-as|copy-dir|delete-file) `,
|
||||
`INFO : - .*Path[12].* +.*Queue copy to.* Path[12].*`,
|
||||
`INFO : Synching Path1 .*? with Path2 `,
|
||||
`INFO : Validating listings for `,
|
||||
}
|
||||
@@ -165,8 +186,11 @@ type bisyncTest struct {
|
||||
golden bool
|
||||
debug bool
|
||||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
}
|
||||
|
||||
var color = bisync.Color
|
||||
|
||||
// TestBisync is a test engine for bisync test cases.
|
||||
func TestBisync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -180,6 +204,8 @@ func TestBisync(t *testing.T) {
|
||||
if *argRefreshTimes {
|
||||
ci.RefreshTimes = true
|
||||
}
|
||||
bisync.Colors = true
|
||||
time.Local, _ = time.LoadLocation("America/New_York")
|
||||
|
||||
baseDir, err := os.Getwd()
|
||||
require.NoError(t, err, "get current directory")
|
||||
@@ -234,6 +260,10 @@ func TestBisync(t *testing.T) {
|
||||
testList = nil
|
||||
for _, testCase := range b.listDir(b.dataRoot) {
|
||||
if strings.HasPrefix(testCase, "test_") {
|
||||
// if dir is empty, skip it (can happen due to gitignored files/dirs when checking out branch)
|
||||
if len(b.listDir(filepath.Join(b.dataRoot, testCase))) == 0 {
|
||||
continue
|
||||
}
|
||||
testList = append(testList, testCase)
|
||||
}
|
||||
}
|
||||
@@ -277,6 +307,10 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
b.goldenDir = b.ensureDir(b.testDir, "golden", false)
|
||||
b.dataDir = b.ensureDir(b.testDir, "modfiles", true) // optional
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
b.sessionName = norm.NFC.String(b.sessionName)
|
||||
b.goldenDir = norm.NFC.String(b.goldenDir)
|
||||
|
||||
// For test stability, jam initial dates to a fixed past date.
|
||||
// Test cases that change files will touch specific files to fixed new dates.
|
||||
initDate := time.Date(2000, time.January, 1, 0, 0, 0, 0, bisync.TZ)
|
||||
@@ -373,16 +407,16 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
var passed bool
|
||||
switch errorCount {
|
||||
case 0:
|
||||
msg = fmt.Sprintf("TEST %s PASSED", b.testCase)
|
||||
msg = color(terminal.GreenFg, fmt.Sprintf("TEST %s PASSED", b.testCase))
|
||||
passed = true
|
||||
case -2:
|
||||
msg = fmt.Sprintf("TEST %s SKIPPED", b.testCase)
|
||||
msg = color(terminal.YellowFg, fmt.Sprintf("TEST %s SKIPPED", b.testCase))
|
||||
passed = true
|
||||
case -1:
|
||||
msg = fmt.Sprintf("TEST %s FAILED - WRONG NUMBER OF FILES", b.testCase)
|
||||
msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - WRONG NUMBER OF FILES", b.testCase))
|
||||
passed = false
|
||||
default:
|
||||
msg = fmt.Sprintf("TEST %s FAILED - %d MISCOMPARED FILES", b.testCase, errorCount)
|
||||
msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - %d MISCOMPARED FILES", b.testCase, errorCount))
|
||||
buckets := b.fs1.Features().BucketBased || b.fs2.Features().BucketBased
|
||||
passed = false
|
||||
if b.testCase == "rmdirs" && buckets {
|
||||
@@ -449,7 +483,7 @@ func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
||||
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
var fsrc, fdst fs.Fs
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
b.logPrintf("%s %s", b.stepStr, line)
|
||||
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
ciSave := *ci
|
||||
@@ -461,6 +495,23 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
ci.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
|
||||
testFunc := func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
dst := "file" + fmt.Sprint(i) + ".txt"
|
||||
err := b.copyFile(ctx, src, b.path2, dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
dst = "file" + fmt.Sprint(100-i) + ".txt"
|
||||
err = b.copyFile(ctx, src, b.path1, dst)
|
||||
if err != nil {
|
||||
fs.Errorf(dst, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
args := splitLine(line)
|
||||
switch args[0] {
|
||||
case "test":
|
||||
@@ -477,7 +528,12 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if fsrc, err = fs.NewFs(ctx, args[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
return purgeChildren(ctx, fsrc, "")
|
||||
err = purgeChildren(ctx, fsrc, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flushCache(fsrc)
|
||||
return
|
||||
case "delete-file":
|
||||
b.checkArgs(args, 1, 1)
|
||||
dir, file := filepath.Split(args[1])
|
||||
@@ -520,6 +576,16 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "copy-as":
|
||||
b.checkArgs(args, 3, 3)
|
||||
return b.copyFile(ctx, args[1], args[2], args[3])
|
||||
case "copy-as-NFC":
|
||||
b.checkArgs(args, 3, 3)
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
return b.copyFile(ctx, args[1], norm.NFC.String(args[2]), norm.NFC.String(args[3]))
|
||||
case "copy-as-NFD":
|
||||
b.checkArgs(args, 3, 3)
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
return b.copyFile(ctx, args[1], norm.NFD.String(args[2]), norm.NFD.String(args[3]))
|
||||
case "copy-dir", "sync-dir":
|
||||
b.checkArgs(args, 2, 2)
|
||||
if fsrc, err = cache.Get(ctx, args[1]); err != nil {
|
||||
@@ -537,9 +603,131 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
return err
|
||||
case "list-dirs":
|
||||
b.checkArgs(args, 1, 1)
|
||||
return b.listSubdirs(ctx, args[1])
|
||||
return b.listSubdirs(ctx, args[1], true)
|
||||
case "list-files":
|
||||
b.checkArgs(args, 1, 1)
|
||||
return b.listSubdirs(ctx, args[1], false)
|
||||
case "bisync":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = false
|
||||
// ci.FixCase = true
|
||||
return b.runBisync(ctx, args[1:])
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
ci.IgnoreTimes = true
|
||||
reset := func() {
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.FixCase = false
|
||||
ci.IgnoreTimes = false
|
||||
}
|
||||
defer reset()
|
||||
b.checkArgs(args, 1, 1)
|
||||
var ok bool
|
||||
var remoteName string
|
||||
var remotePath string
|
||||
remoteName, remotePath, err = fspath.SplitFs(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if remoteName == "" {
|
||||
remoteName = "/"
|
||||
}
|
||||
|
||||
fsrc, err = fs.NewFs(ctx, remoteName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// DEBUG
|
||||
fs.Debugf(remotePath, "is NFC: %v", norm.NFC.IsNormalString(remotePath))
|
||||
fs.Debugf(remotePath, "is NFD: %v", norm.NFD.IsNormalString(remotePath))
|
||||
fs.Debugf(remotePath, "is valid UTF8: %v", utf8.ValidString(remotePath))
|
||||
|
||||
// check if it's a dir, try moving it
|
||||
var leaf string
|
||||
_, leaf, err = fspath.Split(remotePath)
|
||||
if err == nil && leaf == "" {
|
||||
remotePath = args[1]
|
||||
fs.Debugf(remotePath, "attempting to fix directory")
|
||||
|
||||
fixDirname := func(old, new string) {
|
||||
if new != old {
|
||||
oldName, err := fs.NewFs(ctx, old)
|
||||
if err != nil {
|
||||
fs.Logf(old, "error getting Fs: %v", err)
|
||||
}
|
||||
fs.Debugf(nil, "Attempting to move %s to %s", oldName.Root(), new)
|
||||
// Create random name to temporarily move dir to
|
||||
tmpDirName := strings.TrimSuffix(new, slash) + "-rclone-move-" + random.String(8)
|
||||
var tmpDirFs fs.Fs
|
||||
tmpDirFs, _ = fs.NewFs(ctx, tmpDirName)
|
||||
err = sync.MoveDir(ctx, tmpDirFs, oldName, true, true)
|
||||
if err != nil {
|
||||
fs.Debugf(oldName, "error attempting to move folder: %v", err)
|
||||
}
|
||||
// now move the temp dir to real name
|
||||
fsrc, _ = fs.NewFs(ctx, new)
|
||||
err = sync.MoveDir(ctx, fsrc, tmpDirFs, true, true)
|
||||
if err != nil {
|
||||
fs.Debugf(tmpDirFs, "error attempting to move folder to %s: %v", fsrc.Root(), err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(nil, "old and new are equal. Skipping. %s (%s) %s (%s)", old, stringToHash(old), new, stringToHash(new))
|
||||
}
|
||||
}
|
||||
|
||||
if norm.NFC.String(remotePath) != remotePath && norm.NFD.String(remotePath) != remotePath {
|
||||
fs.Debugf(remotePath, "This is neither fully NFD or NFC -- can't fix reliably!")
|
||||
}
|
||||
fixDirname(norm.NFC.String(remotePath), remotePath)
|
||||
fixDirname(norm.NFD.String(remotePath), remotePath)
|
||||
return
|
||||
}
|
||||
|
||||
// if it's a file
|
||||
fs.Debugf(remotePath, "attempting to fix file -- filename hash: %s", stringToHash(leaf))
|
||||
fixFilename := func(old, new string) {
|
||||
ok, err := fs.FileExists(ctx, fsrc, old)
|
||||
if err != nil {
|
||||
fs.Debugf(remotePath, "error checking if file exists: %v", err)
|
||||
}
|
||||
fs.Debugf(old, "file exists: %v %s", ok, stringToHash(old))
|
||||
fs.Debugf(nil, "FILE old: %s new: %s equal: %v", old, new, old == new)
|
||||
fs.Debugf(nil, "HASH old: %s new: %s equal: %v", stringToHash(old), stringToHash(new), stringToHash(old) == stringToHash(new))
|
||||
if ok && new != old {
|
||||
fs.Debugf(new, "attempting to rename %s to %s", old, new)
|
||||
err = operations.MoveFile(ctx, fsrc, fsrc, new, old)
|
||||
if err != nil {
|
||||
fs.Errorf(new, "error trying to rename %s to %s - %v", old, new, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// look for NFC version
|
||||
fixFilename(norm.NFC.String(remotePath), remotePath)
|
||||
// if it's in a subdir we just moved, the file and directory might have different encodings. Check for that.
|
||||
mixed := strings.TrimSuffix(norm.NFD.String(remotePath), norm.NFD.String(leaf)) + norm.NFC.String(leaf)
|
||||
fixFilename(mixed, remotePath)
|
||||
// Try NFD
|
||||
fixFilename(norm.NFD.String(remotePath), remotePath)
|
||||
// Try mixed in reverse
|
||||
mixed = strings.TrimSuffix(norm.NFC.String(remotePath), norm.NFC.String(leaf)) + norm.NFD.String(leaf)
|
||||
fixFilename(mixed, remotePath)
|
||||
// check if it's right now, error if not
|
||||
ok, err = fs.FileExists(ctx, fsrc, remotePath)
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
}
|
||||
@@ -581,6 +769,13 @@ func (b *bisyncTest) checkArgs(args []string, min, max int) {
|
||||
}
|
||||
}
|
||||
|
||||
func flushCache(f fs.Fs) {
|
||||
dirCacheFlush := f.Features().DirCacheFlush
|
||||
if dirCacheFlush == nil {
|
||||
fs.Errorf(nil, "%v: can't flush dir cache", f)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
opt := &bisync.Options{
|
||||
Workdir: b.workDir,
|
||||
@@ -589,10 +784,15 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
MaxDelete: bisync.DefaultMaxDelete,
|
||||
CheckFilename: bisync.DefaultCheckFilename,
|
||||
CheckSync: bisync.CheckSyncTrue,
|
||||
TestFn: b.TestFn,
|
||||
}
|
||||
octx, ci := fs.AddConfig(ctx)
|
||||
fs1, fs2 := b.fs1, b.fs2
|
||||
|
||||
// flush cache
|
||||
flushCache(fs1)
|
||||
flushCache(fs2)
|
||||
|
||||
addSubdir := func(path, subdir string) fs.Fs {
|
||||
remote := path + subdir
|
||||
f, err := fs.NewFs(ctx, remote)
|
||||
@@ -633,9 +833,41 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
require.NoError(b.t, err, "parsing max-delete=%q", val)
|
||||
case "size-only":
|
||||
ci.SizeOnly = true
|
||||
case "ignore-size":
|
||||
ci.IgnoreSize = true
|
||||
case "checksum":
|
||||
ci.CheckSum = true
|
||||
opt.Compare.DownloadHash = true // allows us to test crypt and the like
|
||||
case "compare-all":
|
||||
opt.CompareFlag = "size,modtime,checksum"
|
||||
opt.Compare.DownloadHash = true // allows us to test crypt and the like
|
||||
case "subdir":
|
||||
fs1 = addSubdir(b.path1, val)
|
||||
fs2 = addSubdir(b.path2, val)
|
||||
case "backupdir1":
|
||||
opt.BackupDir1 = val
|
||||
case "backupdir2":
|
||||
opt.BackupDir2 = val
|
||||
case "ignore-listing-checksum":
|
||||
opt.IgnoreListingChecksum = true
|
||||
case "no-norm":
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.IgnoreCaseSync = false
|
||||
case "norm":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = true
|
||||
case "fix-case":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = true
|
||||
ci.FixCase = true
|
||||
case "conflict-resolve":
|
||||
_ = opt.ConflictResolve.Set(val)
|
||||
case "conflict-loser":
|
||||
_ = opt.ConflictLoser.Set(val)
|
||||
case "conflict-suffix":
|
||||
opt.ConflictSuffixFlag = val
|
||||
case "resync-mode":
|
||||
_ = opt.ResyncMode.Set(val)
|
||||
default:
|
||||
return fmt.Errorf("invalid bisync option %q", arg)
|
||||
}
|
||||
@@ -689,7 +921,7 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
||||
var fsrc, fdst fs.Fs
|
||||
var srcPath, srcFile, dstPath, dstFile string
|
||||
|
||||
switch fsrc, err = cache.Get(ctx, src); err {
|
||||
switch fsrc, err = fs.NewFs(ctx, src); err {
|
||||
case fs.ErrorIsFile:
|
||||
// ok
|
||||
case nil:
|
||||
@@ -712,7 +944,7 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
||||
if dstFile != "" {
|
||||
dstPath = dst // force directory
|
||||
}
|
||||
if fdst, err = cache.Get(ctx, dstPath); err != nil {
|
||||
if fdst, err = fs.NewFs(ctx, dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -729,23 +961,27 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
||||
return operations.CopyFile(fctx, fdst, fsrc, dstFile, srcFile)
|
||||
}
|
||||
|
||||
// listSubdirs is equivalent to `rclone lsf -R --dirs-only`
|
||||
func (b *bisyncTest) listSubdirs(ctx context.Context, remote string) error {
|
||||
// listSubdirs is equivalent to `rclone lsf -R [--dirs-only]`
|
||||
func (b *bisyncTest) listSubdirs(ctx context.Context, remote string, DirsOnly bool) error {
|
||||
f, err := fs.NewFs(ctx, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// flush cache
|
||||
flushCache(f)
|
||||
|
||||
opt := operations.ListJSONOpt{
|
||||
NoModTime: true,
|
||||
NoMimeType: true,
|
||||
DirsOnly: true,
|
||||
DirsOnly: DirsOnly,
|
||||
Recurse: true,
|
||||
}
|
||||
fmt := operations.ListFormat{}
|
||||
fmt.SetDirSlash(true)
|
||||
fmt.AddPath()
|
||||
printItem := func(item *operations.ListJSONItem) error {
|
||||
b.logPrintf("%s", fmt.Format(item))
|
||||
b.logPrintf("%s - filename hash: %s", fmt.Format(item), stringToHash(item.Name))
|
||||
return nil
|
||||
}
|
||||
return operations.ListJSON(ctx, f, "", &opt, printItem)
|
||||
@@ -873,7 +1109,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
|
||||
if goldenNum != resultNum {
|
||||
log.Print(divider)
|
||||
log.Printf("MISCOMPARE - Number of Golden and Results files do not match:")
|
||||
log.Print(color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
|
||||
log.Printf(" Golden count: %d", goldenNum)
|
||||
log.Printf(" Result count: %d", resultNum)
|
||||
log.Printf(" Golden files: %s", strings.Join(goldenFiles, ", "))
|
||||
@@ -909,7 +1145,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
require.NoError(b.t, os.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
|
||||
}
|
||||
|
||||
if goldenText == resultText {
|
||||
if goldenText == resultText || strings.Contains(resultText, ".DS_Store") {
|
||||
continue
|
||||
}
|
||||
errorCount++
|
||||
@@ -923,7 +1159,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
require.NoError(b.t, err, "diff failed")
|
||||
|
||||
log.Print(divider)
|
||||
log.Printf("| MISCOMPARE -Golden vs +Results for %s", file)
|
||||
log.Printf(color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||
log.Printf("| %s", strings.TrimSpace(line))
|
||||
}
|
||||
@@ -951,6 +1187,10 @@ func (b *bisyncTest) storeGolden() {
|
||||
if fileType(fileName) == "lock" {
|
||||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
goldName := b.toGolden(fileName)
|
||||
if goldName != fileName {
|
||||
targetPath := filepath.Join(b.workDir, goldName)
|
||||
@@ -972,6 +1212,10 @@ func (b *bisyncTest) storeGolden() {
|
||||
if fileType(fileName) == "lock" {
|
||||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
text := b.mangleResult(b.goldenDir, fileName, true)
|
||||
|
||||
goldName := b.toGolden(fileName)
|
||||
@@ -988,17 +1232,27 @@ func (b *bisyncTest) storeGolden() {
|
||||
|
||||
// mangleResult prepares test logs or listings for comparison
|
||||
func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
if file == "backupdirs" {
|
||||
return "skipping backupdirs"
|
||||
}
|
||||
buf, err := os.ReadFile(filepath.Join(dir, file))
|
||||
require.NoError(b.t, err)
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
buf = norm.NFC.Bytes(buf)
|
||||
|
||||
text := string(buf)
|
||||
|
||||
switch fileType(strings.TrimSuffix(file, ".sav")) {
|
||||
case "queue":
|
||||
lines := strings.Split(text, eol)
|
||||
sort.Strings(lines)
|
||||
for i, line := range lines {
|
||||
lines[i] = normalizeEncoding(line)
|
||||
}
|
||||
return joinLines(lines)
|
||||
case "listing":
|
||||
return mangleListing(text, golden)
|
||||
return b.mangleListing(text, golden, file)
|
||||
case "log":
|
||||
// fall thru
|
||||
default:
|
||||
@@ -1006,7 +1260,16 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
}
|
||||
|
||||
// Adapt log lines to the golden way.
|
||||
lines := strings.Split(string(buf), eol)
|
||||
// First replace filenames with whitespace
|
||||
// some backends (such as crypt) log them on multiple lines due to encoding differences, while others (local) do not
|
||||
wsrep := []string{
|
||||
"subdir with" + eol + "white space.txt/file2 with" + eol + "white space.txt",
|
||||
"subdir with white space.txt/file2 with white space.txt",
|
||||
}
|
||||
whitespaceJoiner := strings.NewReplacer(wsrep...)
|
||||
s := whitespaceJoiner.Replace(string(buf))
|
||||
|
||||
lines := strings.Split(s, eol)
|
||||
pathReplacer := b.newReplacer(true)
|
||||
|
||||
rep := logReplacements
|
||||
@@ -1090,7 +1353,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
}
|
||||
|
||||
// mangleListing sorts listing lines before comparing.
|
||||
func mangleListing(text string, golden bool) string {
|
||||
func (b *bisyncTest) mangleListing(text string, golden bool, file string) string {
|
||||
lines := strings.Split(text, eol)
|
||||
|
||||
hasHeader := len(lines) > 0 && strings.HasPrefix(lines[0], bisync.ListingHeader)
|
||||
@@ -1114,12 +1377,43 @@ func mangleListing(text string, golden bool) string {
|
||||
return getFile(lines[i]) < getFile(lines[j])
|
||||
})
|
||||
|
||||
// Store hash as golden but ignore when comparing.
|
||||
// parse whether this is Path1 or Path2 (so we can apply per-Fs precision/hash settings)
|
||||
isPath1 := strings.Contains(file, ".path1.lst")
|
||||
f := b.fs2
|
||||
if isPath1 {
|
||||
f = b.fs1
|
||||
}
|
||||
|
||||
// account for differences in backend features when comparing
|
||||
if !golden {
|
||||
for i, s := range lines {
|
||||
// Store hash as golden but ignore when comparing (only if no md5 support).
|
||||
match := regex.FindStringSubmatch(strings.TrimSpace(s))
|
||||
if match != nil && match[2] != "-" {
|
||||
lines[i] = match[1] + "-" + match[3] + match[4]
|
||||
if match != nil && match[2] != "-" && (!b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5)) { // if hash is not empty and either side lacks md5
|
||||
lines[i] = match[1] + "-" + match[3] + match[4] // replace it with "-" for comparison purposes (see #5679)
|
||||
}
|
||||
// account for modtime precision
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
const lineFormat = "%s %8d %s %s %s %q\n"
|
||||
var TZ = time.UTC
|
||||
fields := lineRegex.FindStringSubmatch(strings.TrimSuffix(lines[i], "\n"))
|
||||
if fields != nil {
|
||||
sizeVal, sizeErr := strconv.ParseInt(fields[2], 10, 64)
|
||||
if sizeErr == nil {
|
||||
// account for filename encoding differences by normalizing to OS encoding
|
||||
fields[6] = normalizeEncoding(fields[6])
|
||||
timeStr := fields[5]
|
||||
if f.Precision() == fs.ModTimeNotSupported {
|
||||
lines[i] = fmt.Sprintf(lineFormat, fields[1], sizeVal, fields[3], fields[4], "-", fields[6])
|
||||
continue
|
||||
}
|
||||
timeVal, timeErr := time.ParseInLocation(timeFormat, timeStr, TZ)
|
||||
if timeErr == nil {
|
||||
timeRound := timeVal.Round(f.Precision() * 2)
|
||||
lines[i] = fmt.Sprintf(lineFormat, fields[1], sizeVal, fields[3], fields[4], timeRound, fields[6])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1163,12 +1457,15 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
||||
b.dataDir + slash, "{datadir/}",
|
||||
b.testDir + slash, "{testdir/}",
|
||||
b.workDir + slash, "{workdir/}",
|
||||
b.fs1.String(), "{path1String}",
|
||||
b.fs2.String(), "{path2String}",
|
||||
b.path1, "{path1/}",
|
||||
b.path2, "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.workDir, "{workdir}",
|
||||
b.sessionName, "{session}",
|
||||
}
|
||||
if fixSlash {
|
||||
@@ -1193,6 +1490,10 @@ func (b *bisyncTest) toGolden(name string) string {
|
||||
name = strings.ReplaceAll(name, b.canonPath1, goldenCanonBase)
|
||||
name = strings.ReplaceAll(name, b.canonPath2, goldenCanonBase)
|
||||
name = strings.TrimSuffix(name, ".sav")
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
name = norm.NFC.String(name)
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
@@ -1213,8 +1514,22 @@ func (b *bisyncTest) ensureDir(parent, dir string, optional bool) string {
|
||||
func (b *bisyncTest) listDir(dir string) (names []string) {
|
||||
files, err := os.ReadDir(dir)
|
||||
require.NoError(b.t, err)
|
||||
ignoreIt := func(file string) bool {
|
||||
ignoreList := []string{
|
||||
// ".lst-control", ".lst-dry-control", ".lst-old", ".lst-dry-old",
|
||||
".DS_Store"}
|
||||
for _, s := range ignoreList {
|
||||
if strings.Contains(file, s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, file := range files {
|
||||
names = append(names, filepath.Base(file.Name()))
|
||||
if ignoreIt(file.Name()) {
|
||||
continue
|
||||
}
|
||||
names = append(names, filepath.Base(norm.NFC.String(file.Name())))
|
||||
}
|
||||
// Sort files to ensure comparability.
|
||||
sort.Strings(names)
|
||||
@@ -1230,7 +1545,7 @@ func fileType(fileName string) string {
|
||||
return "log"
|
||||
}
|
||||
switch filepath.Ext(fileName) {
|
||||
case ".lst", ".lst-new", ".lst-err", ".lst-dry", ".lst-dry-new":
|
||||
case ".lst", ".lst-new", ".lst-err", ".lst-dry", ".lst-dry-new", ".lst-old", ".lst-dry-old", ".lst-control", ".lst-dry-control":
|
||||
return "listing"
|
||||
case ".que":
|
||||
return "queue"
|
||||
@@ -1254,3 +1569,36 @@ func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
||||
require.NoError(b.t, err, "writing log file")
|
||||
}
|
||||
}
|
||||
|
||||
// account for filename encoding differences between remotes by normalizing to OS encoding
|
||||
func normalizeEncoding(s string) string {
|
||||
if s == "" || s == "." {
|
||||
return s
|
||||
}
|
||||
nameVal, err := strconv.Unquote(s)
|
||||
if err != nil {
|
||||
nameVal = s
|
||||
}
|
||||
nameVal = filepath.Clean(nameVal)
|
||||
nameVal = encoder.OS.FromStandardPath(nameVal)
|
||||
return strconv.Quote(encoder.OS.ToStandardPath(filepath.ToSlash(nameVal)))
|
||||
}
|
||||
|
||||
func stringToHash(s string) string {
|
||||
ht := hash.MD5
|
||||
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
fs.Errorf(s, "hash unsupported: %v", err)
|
||||
}
|
||||
|
||||
_, err = hasher.Write([]byte(s))
|
||||
if err != nil {
|
||||
fs.Errorf(s, "failed to write to hasher: %v", err)
|
||||
}
|
||||
|
||||
sum, err := hasher.SumString(ht, false)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "hasher returned an error: %v", err)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
271
cmd/bisync/checkfn.go
Normal file
271
cmd/bisync/checkfn.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var hashType hash.Type
|
||||
var fsrc, fdst fs.Fs
|
||||
var fcrypt *crypt.Fs
|
||||
|
||||
// WhichCheck determines which CheckFn we should use based on the Fs types
|
||||
// It is more robust and accurate than Check because
|
||||
// it will fallback to CryptCheck or DownloadCheck instead of --size-only!
|
||||
// it returns the *operations.CheckOpt with the CheckFn set.
|
||||
func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
|
||||
ci := fs.GetConfig(ctx)
|
||||
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
|
||||
|
||||
// note that ci.IgnoreChecksum doesn't change the behavior of Check -- it's just a way to opt-out of cryptcheck/download
|
||||
if common.Count() > 0 || ci.SizeOnly || ci.IgnoreChecksum {
|
||||
// use normal check
|
||||
opt.Check = CheckFn
|
||||
return opt
|
||||
}
|
||||
|
||||
FsrcCrypt, srcIsCrypt := opt.Fsrc.(*crypt.Fs)
|
||||
FdstCrypt, dstIsCrypt := opt.Fdst.(*crypt.Fs)
|
||||
|
||||
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
|
||||
// if both are crypt or only dst is crypt
|
||||
hashType = FdstCrypt.UnWrap().Hashes().GetOne()
|
||||
if hashType != hash.None {
|
||||
// use cryptcheck
|
||||
fsrc = opt.Fsrc
|
||||
fdst = opt.Fdst
|
||||
fcrypt = FdstCrypt
|
||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = CryptCheckFn
|
||||
return opt
|
||||
}
|
||||
} else if srcIsCrypt && !dstIsCrypt {
|
||||
// if only src is crypt
|
||||
hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
|
||||
if hashType != hash.None {
|
||||
// use reverse cryptcheck
|
||||
fsrc = opt.Fdst
|
||||
fdst = opt.Fsrc
|
||||
fcrypt = FsrcCrypt
|
||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = ReverseCryptCheckFn
|
||||
return opt
|
||||
}
|
||||
}
|
||||
|
||||
// if we've gotten this far, niether check or cryptcheck will work, so use --download
|
||||
fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = DownloadCheckFn
|
||||
return opt
|
||||
}
|
||||
|
||||
// CheckFn is a slightly modified version of Check
|
||||
func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
same, ht, err := operations.CheckHashes(ctx, src, dst)
|
||||
if err != nil {
|
||||
return true, false, err
|
||||
}
|
||||
if ht == hash.None {
|
||||
return false, true, nil
|
||||
}
|
||||
if !same {
|
||||
err = fmt.Errorf("%v differ", ht)
|
||||
fs.Errorf(src, "%v", err)
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// CryptCheckFn is a slightly modified version of CryptCheck
|
||||
func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
cryptDst := dst.(*crypt.Object)
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
|
||||
}
|
||||
if underlyingHash == "" {
|
||||
return false, true, nil
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
return true, false, fmt.Errorf("error computing hash: %w", err)
|
||||
}
|
||||
if cryptHash == "" {
|
||||
return false, true, nil
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Debugf(src, err.Error())
|
||||
// using same error msg as CheckFn so integration tests match
|
||||
err = fmt.Errorf("%v differ", hashType)
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
|
||||
// result: src is crypt, dst is non-crypt
|
||||
func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
return CryptCheckFn(ctx, src, dst)
|
||||
}
|
||||
|
||||
// DownloadCheckFn is a slightly modified version of Check with --download
|
||||
func DownloadCheckFn(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) {
|
||||
differ, err = operations.CheckIdenticalDownload(ctx, a, b)
|
||||
if err != nil {
|
||||
return true, true, fmt.Errorf("failed to download: %w", err)
|
||||
}
|
||||
return differ, false, nil
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
opt = WhichCheck(ctxCheck, opt)
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.CheckFn(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// WhichEqual is similar to WhichCheck, but checks a single object.
|
||||
// Returns true if the objects are equal, false if they differ or if we don't know
|
||||
func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
|
||||
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
|
||||
if checkopterr != nil {
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt = WhichCheck(ctx, opt)
|
||||
differ, noHash, err := opt.Check(ctx, dst, src)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "failed to check: %v", err)
|
||||
return false
|
||||
}
|
||||
if noHash {
|
||||
fs.Errorf(src, "failed to check as hash is missing")
|
||||
return false
|
||||
}
|
||||
return !differ
|
||||
}
|
||||
|
||||
// Replaces the standard Equal func with one that also considers checksum
|
||||
// Note that it also updates the modtime the same way as Sync
|
||||
func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.CheckSum = false // force checksum off so modtime is evaluated if needed
|
||||
// modtime and size settings should already be set correctly for Equal
|
||||
var equalFn operations.EqualFn = func(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
|
||||
fs.Debugf(src, "evaluating...")
|
||||
equal := false
|
||||
logger, _ := operations.GetLogger(ctx)
|
||||
// temporarily unset logger, we don't want Equal to duplicate it
|
||||
noop := func(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
|
||||
fs.Debugf(src, "equal skipped")
|
||||
}
|
||||
ctxNoLogger := operations.WithLogger(ctx, noop)
|
||||
|
||||
timeSizeEqualFn := func() (equal bool, skipHash bool) { return operations.Equal(ctxNoLogger, src, dst), false } // normally use Equal()
|
||||
if b.opt.ResyncMode == PreferOlder || b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller {
|
||||
timeSizeEqualFn = func() (equal bool, skipHash bool) { return b.resyncTimeSizeEqual(ctxNoLogger, src, dst) } // but override for --resync-mode older, larger, smaller
|
||||
}
|
||||
skipHash := false // (note that we might skip it anyway based on compare/ht settings)
|
||||
equal, skipHash = timeSizeEqualFn()
|
||||
if equal && !skipHash {
|
||||
whichHashType := func(f fs.Info) hash.Type {
|
||||
ht := getHashType(f.Name())
|
||||
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
|
||||
ht = f.Hashes().GetOne()
|
||||
}
|
||||
return ht
|
||||
}
|
||||
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
|
||||
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
|
||||
srcHash, _ = tryDownloadHash(ctx, src, srcHash)
|
||||
dstHash, _ = tryDownloadHash(ctx, dst, dstHash)
|
||||
equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
|
||||
}
|
||||
if equal {
|
||||
logger(ctx, operations.Match, src, dst, nil)
|
||||
fs.Debugf(src, "EqualFn: files are equal")
|
||||
return true
|
||||
}
|
||||
logger(ctx, operations.Differ, src, dst, nil)
|
||||
fs.Debugf(src, "EqualFn: files are NOT equal")
|
||||
return false
|
||||
}
|
||||
return operations.WithEqualFn(ctx, equalFn)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, skipHash bool) {
|
||||
switch b.opt.ResyncMode {
|
||||
case PreferLarger, PreferSmaller:
|
||||
// note that arg order is path1, path2, regardless of src/dst
|
||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||
if sizeDiffers(path1.Size(), path2.Size()) {
|
||||
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
||||
// don't need to check/update modtime here, as sizes definitely differ and something will be transferred
|
||||
return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true
|
||||
}
|
||||
// sizes equal or don't know, so continue to checking time/hash, if applicable
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
case PreferOlder:
|
||||
// note that arg order is path1, path2, regardless of src/dst
|
||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||
if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) {
|
||||
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
||||
// if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring
|
||||
if !b.resyncWinningPathToEqual(winningPath) {
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
}
|
||||
// if dst is winner (and definitely unequal), do not proceed further as we want dst to overwrite src regardless of size difference, and we do not want dest modtime updated
|
||||
return true, true
|
||||
}
|
||||
// times equal or don't know, so continue to checking size/hash, if applicable
|
||||
}
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
}
|
||||
@@ -25,9 +25,13 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// TestFunc allows mocking errors during tests
|
||||
type TestFunc func()
|
||||
|
||||
// Options keep bisync options
|
||||
type Options struct {
|
||||
Resync bool
|
||||
Resync bool // whether or not this is a resync
|
||||
ResyncMode Prefer // which mode to use for resync
|
||||
CheckAccess bool
|
||||
CheckFilename string
|
||||
CheckSync CheckSyncMode
|
||||
@@ -37,11 +41,26 @@ type Options struct {
|
||||
Force bool
|
||||
FiltersFile string
|
||||
Workdir string
|
||||
OrigBackupDir string
|
||||
BackupDir1 string
|
||||
BackupDir2 string
|
||||
DryRun bool
|
||||
NoCleanup bool
|
||||
SaveQueues bool // save extra debugging files (test only flag)
|
||||
IgnoreListingChecksum bool
|
||||
Resilient bool
|
||||
Recover bool
|
||||
TestFn TestFunc // test-only option, for mocking errors
|
||||
Retries int
|
||||
Compare CompareOpt
|
||||
CompareFlag string
|
||||
DebugName string
|
||||
MaxLock time.Duration
|
||||
ConflictResolve Prefer
|
||||
ConflictLoser ConflictLoserAction
|
||||
ConflictSuffixFlag string
|
||||
ConflictSuffix1 string
|
||||
ConflictSuffix2 string
|
||||
}
|
||||
|
||||
// Default values
|
||||
@@ -99,9 +118,14 @@ func (x *CheckSyncMode) Type() string {
|
||||
var Opt Options
|
||||
|
||||
func init() {
|
||||
Opt.Retries = 3
|
||||
Opt.MaxLock = 0
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
|
||||
// when adding new flags, remember to also update the rc params:
|
||||
// cmd/bisync/rc.go cmd/bisync/help.go (not docs/content/rc.md)
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ResyncMode, "resync-mode", "", "During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
|
||||
@@ -110,10 +134,25 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.BackupDir1, "backup-dir1", "", Opt.BackupDir1, "--backup-dir for Path1. Must be a non-overlapping path on the same remote.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.BackupDir2, "backup-dir2", "", Opt.BackupDir2, "--backup-dir for Path2. Must be a non-overlapping path on the same remote.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.DebugName, "debugname", "", Opt.DebugName, "Debug by tracking one file at various points throughout a bisync run (when -v or -vv)", "")
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
||||
flags.IntVarP(cmdFlags, &Opt.Retries, "retries", "", Opt.Retries, "Retry operations this many times if they fail", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.DownloadHash, "download-hash", "", Opt.Compare.DownloadHash, "Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)", "")
|
||||
flags.DurationVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", Opt.MaxLock, "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictResolve, "conflict-resolve", "", "Automatically resolve conflicts by preferring the version that is: "+ConflictResolveList+" (default: none)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictLoser, "conflict-loser", "", "Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): "+ConflictLoserList+" (default: num)", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.ConflictSuffixFlag, "conflict-suffix", "", Opt.ConflictSuffixFlag, "Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')", "")
|
||||
_ = cmdFlags.MarkHidden("debugname")
|
||||
_ = cmdFlags.MarkHidden("localtime")
|
||||
}
|
||||
|
||||
// bisync command definition
|
||||
@@ -124,8 +163,11 @@ var commandDefinition = &cobra.Command{
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
"groups": "Filter,Copy,Important",
|
||||
"status": "Beta",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
||||
// Generally it's best to put init-type stuff in Bisync() (operations.go)
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
|
||||
if file1 != "" || file2 != "" {
|
||||
@@ -149,7 +191,7 @@ var commandDefinition = &cobra.Command{
|
||||
}
|
||||
}
|
||||
|
||||
fs.Logf(nil, "bisync is EXPERIMENTAL. Don't use in production!")
|
||||
fs.Logf(nil, "bisync is IN BETA. Don't use in production!")
|
||||
cmd.Run(false, true, command, func() error {
|
||||
err := Bisync(ctx, fs1, fs2, &opt)
|
||||
if err == ErrBisyncAborted {
|
||||
|
||||
309
cmd/bisync/compare.go
Normal file
309
cmd/bisync/compare.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
mutex "sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// CompareOpt describes the Compare options in force
|
||||
type CompareOpt = struct {
|
||||
Modtime bool
|
||||
Size bool
|
||||
Checksum bool
|
||||
HashType1 hash.Type
|
||||
HashType2 hash.Type
|
||||
NoSlowHash bool
|
||||
SlowHashSyncOnly bool
|
||||
SlowHashDetected bool
|
||||
DownloadHash bool
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// defaults
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Checksum = false
|
||||
|
||||
if ci.SizeOnly {
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else if ci.CheckSum && !b.opt.IgnoreListingChecksum {
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = false
|
||||
b.opt.Compare.Checksum = true
|
||||
}
|
||||
|
||||
if ci.IgnoreSize {
|
||||
b.opt.Compare.Size = false
|
||||
}
|
||||
|
||||
err = b.setFromCompareFlag(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.fs1.Features().SlowHash || b.fs2.Features().SlowHash {
|
||||
b.opt.Compare.SlowHashDetected = true
|
||||
}
|
||||
if b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
b.setHashType(ci)
|
||||
}
|
||||
|
||||
// Checks and Warnings
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
notSupported("--update", ci.UpdateOlder, &ci.UpdateOlder)
|
||||
notSupported("--no-check-dest", ci.NoCheckDest, &ci.NoCheckDest)
|
||||
notSupported("--no-traverse", ci.NoTraverse, &ci.NoTraverse)
|
||||
// TODO: thorough search for other flags that should be on this list...
|
||||
|
||||
prettyprint(b.opt.Compare, "Bisyncing with Comparison Settings", fs.LogLevelInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if the sizes are definitely different.
|
||||
// returns false if equal, or if either is unknown.
|
||||
func sizeDiffers(a, b int64) bool {
|
||||
if a < 0 || b < 0 {
|
||||
return false
|
||||
}
|
||||
return a != b
|
||||
}
|
||||
|
||||
// returns true if the hashes are definitely different.
|
||||
// returns false if equal, or if either is unknown.
|
||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return a != b
|
||||
}
|
||||
|
||||
// chooses hash type, giving priority to types both sides have in common
|
||||
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
downloadHash = b.opt.Compare.DownloadHash
|
||||
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
|
||||
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
|
||||
} else {
|
||||
common := b.fs1.Hashes().Overlap(b.fs2.Hashes())
|
||||
if common.Count() > 0 && common.GetOne() != hash.None {
|
||||
ht := common.GetOne()
|
||||
b.opt.Compare.HashType1 = ht
|
||||
b.opt.Compare.HashType2 = ht
|
||||
if !b.opt.Compare.SlowHashSyncOnly || !b.opt.Compare.SlowHashDetected {
|
||||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
}
|
||||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if the times are definitely different (by more than the modify window).
|
||||
// returns false if equal, within modify window, or if either is unknown.
|
||||
// considers precision per-Fs.
|
||||
func timeDiffers(ctx context.Context, a, b time.Time, fsA, fsB fs.Info) bool {
|
||||
modifyWindow := fs.GetModifyWindow(ctx, fsA, fsB)
|
||||
if modifyWindow == fs.ModTimeNotSupported {
|
||||
return false
|
||||
}
|
||||
if a.IsZero() || b.IsZero() {
|
||||
fs.Logf(fsA, "Fs supports modtime, but modtime is missing")
|
||||
return false
|
||||
}
|
||||
dt := b.Sub(a)
|
||||
if dt < modifyWindow && dt > -modifyWindow {
|
||||
fs.Debugf(a, "modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow)
|
||||
return false
|
||||
}
|
||||
|
||||
fs.Debugf(a, "Modification times differ by %s: %v, %v", dt, a, b)
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
case "size":
|
||||
b.opt.Compare.Size = true
|
||||
CompareFlag.Size = true
|
||||
case "modtime":
|
||||
b.opt.Compare.Modtime = true
|
||||
CompareFlag.Modtime = true
|
||||
case "checksum":
|
||||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
// exclusions (override defaults, only if --compare != "")
|
||||
if !CompareFlag.Size {
|
||||
b.opt.Compare.Size = false
|
||||
}
|
||||
if !CompareFlag.Modtime {
|
||||
b.opt.Compare.Modtime = false
|
||||
}
|
||||
if !CompareFlag.Checksum {
|
||||
b.opt.Compare.Checksum = false
|
||||
}
|
||||
|
||||
// override sync flags to match
|
||||
ci := fs.GetConfig(ctx)
|
||||
if b.opt.Compare.Checksum {
|
||||
ci.CheckSum = true
|
||||
}
|
||||
if b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if !b.opt.Compare.Size {
|
||||
ci.IgnoreSize = true
|
||||
}
|
||||
if !b.opt.Compare.Modtime {
|
||||
ci.UseServerModTime = true
|
||||
}
|
||||
if b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
ci.SizeOnly = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
|
||||
var downloadHash bool
|
||||
var downloadHashWarn mutex.Once
|
||||
var firstDownloadHash mutex.Once
|
||||
|
||||
func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
|
||||
if hashVal != "" || !downloadHash {
|
||||
return hashVal, nil
|
||||
}
|
||||
obj, ok := o.(fs.Object)
|
||||
if !ok {
|
||||
fs.Infof(o, "failed to download hash -- not an fs.Object")
|
||||
return hashVal, fs.ErrorObjectNotFound
|
||||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
tr.Done(ctx, nil)
|
||||
}()
|
||||
|
||||
sum, err := operations.HashSum(ctx, hash.MD5, false, true, obj)
|
||||
if err != nil {
|
||||
fs.Infof(o, "DownloadHash -- hash: %v, err: %v", sum, err)
|
||||
} else {
|
||||
fs.Debugf(o, "DownloadHash -- hash: %v", sum)
|
||||
}
|
||||
return sum, err
|
||||
}
|
||||
@@ -3,19 +3,18 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// delta
|
||||
@@ -26,14 +25,17 @@ const (
|
||||
deltaNew delta = 1 << iota
|
||||
deltaNewer
|
||||
deltaOlder
|
||||
deltaSize
|
||||
deltaLarger
|
||||
deltaSmaller
|
||||
deltaHash
|
||||
deltaDeleted
|
||||
)
|
||||
|
||||
const (
|
||||
deltaModified delta = deltaNewer | deltaOlder | deltaSize | deltaHash | deltaDeleted
|
||||
deltaOther delta = deltaNew | deltaNewer | deltaOlder
|
||||
deltaSize delta = deltaLarger | deltaSmaller
|
||||
deltaTime delta = deltaNewer | deltaOlder
|
||||
deltaModified delta = deltaTime | deltaSize | deltaHash
|
||||
deltaOther delta = deltaNew | deltaTime | deltaSize | deltaHash
|
||||
)
|
||||
|
||||
func (d delta) is(cond delta) bool {
|
||||
@@ -43,6 +45,9 @@ func (d delta) is(cond delta) bool {
|
||||
// deltaSet
|
||||
type deltaSet struct {
|
||||
deltas map[string]delta
|
||||
size map[string]int64
|
||||
time map[string]time.Time
|
||||
hash map[string]string
|
||||
opt *Options
|
||||
fs fs.Fs // base filesystem
|
||||
msg string // filesystem name for logging
|
||||
@@ -74,71 +79,77 @@ func (ds *deltaSet) printStats() {
|
||||
}
|
||||
nAll := len(ds.deltas)
|
||||
nNew := 0
|
||||
nMod := 0
|
||||
nTime := 0
|
||||
nNewer := 0
|
||||
nOlder := 0
|
||||
nSize := 0
|
||||
nLarger := 0
|
||||
nSmaller := 0
|
||||
nHash := 0
|
||||
nDeleted := 0
|
||||
for _, d := range ds.deltas {
|
||||
if d.is(deltaNew) {
|
||||
nNew++
|
||||
}
|
||||
if d.is(deltaModified) {
|
||||
nMod++
|
||||
}
|
||||
if d.is(deltaTime) {
|
||||
nTime++
|
||||
}
|
||||
if d.is(deltaNewer) {
|
||||
nNewer++
|
||||
}
|
||||
if d.is(deltaOlder) {
|
||||
nOlder++
|
||||
}
|
||||
if d.is(deltaSize) {
|
||||
nSize++
|
||||
}
|
||||
if d.is(deltaLarger) {
|
||||
nLarger++
|
||||
}
|
||||
if d.is(deltaSmaller) {
|
||||
nSmaller++
|
||||
}
|
||||
if d.is(deltaHash) {
|
||||
nHash++
|
||||
}
|
||||
if d.is(deltaDeleted) {
|
||||
nDeleted++
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "%s: %4d changes: %4d new, %4d newer, %4d older, %4d deleted",
|
||||
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
// TODO: consider using custom CheckFn to act like cryptcheck, if either fs is a crypt remote and -c has been passed
|
||||
// note that cryptCheck() is not currently exported
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.Check(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
if nAll != nNew+nMod+nDeleted {
|
||||
fs.Errorf(nil, "something doesn't add up! %4d != %4d + %4d + %4d", nAll, nNew, nMod, nDeleted)
|
||||
}
|
||||
fs.Infof(nil, "%s: %4d changes: "+Color(terminal.GreenFg, "%4d new")+", "+Color(terminal.YellowFg, "%4d modified")+", "+Color(terminal.RedFg, "%4d deleted"),
|
||||
ds.msg, nAll, nNew, nMod, nDeleted)
|
||||
if nMod > 0 {
|
||||
details := []string{}
|
||||
if nTime > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d newer"), nNewer))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d older"), nOlder))
|
||||
}
|
||||
if nSize > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d larger"), nLarger))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d smaller"), nSmaller))
|
||||
}
|
||||
if nHash > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d hash differs"), nHash))
|
||||
}
|
||||
if (nNewer+nOlder != nTime) || (nLarger+nSmaller != nSize) || (nMod > nTime+nSize+nHash) {
|
||||
fs.Errorf(nil, "something doesn't add up!")
|
||||
}
|
||||
|
||||
fs.Infof(nil, "(%s: %s)", Color(terminal.YellowFg, "Modified"), strings.Join(details, ", "))
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// findDeltas
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) {
|
||||
var old, now *fileList
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string, now *fileList, msg string) (ds *deltaSet, err error) {
|
||||
var old *fileList
|
||||
newListing := oldListing + "-new"
|
||||
|
||||
old, err = b.loadListing(oldListing)
|
||||
if err != nil {
|
||||
@@ -150,7 +161,6 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
return
|
||||
}
|
||||
|
||||
now, err = b.makeListing(fctx, f, newListing)
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
@@ -160,6 +170,9 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
|
||||
ds = &deltaSet{
|
||||
deltas: map[string]delta{},
|
||||
size: map[string]int64{},
|
||||
time: map[string]time.Time{},
|
||||
hash: map[string]string{},
|
||||
fs: f,
|
||||
msg: msg,
|
||||
oldCount: len(old.list),
|
||||
@@ -168,26 +181,75 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
}
|
||||
|
||||
for _, file := range old.list {
|
||||
// REMEMBER: this section is only concerned with comparing listings from the same side (not different sides)
|
||||
d := deltaZero
|
||||
s := int64(0)
|
||||
h := ""
|
||||
var t time.Time
|
||||
if !now.has(file) {
|
||||
b.indent(msg, file, "File was deleted")
|
||||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||
ds.deleted++
|
||||
d |= deltaDeleted
|
||||
} else {
|
||||
if old.getTime(file) != now.getTime(file) {
|
||||
if old.beforeOther(now, file) {
|
||||
b.indent(msg, file, "File is newer")
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
b.indent(msg, file, "File is OLDER")
|
||||
d |= deltaOlder
|
||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||
if !now.isDir(file) {
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
// TODO Compare sizes and hashes
|
||||
}
|
||||
|
||||
if d.is(deltaModified) {
|
||||
ds.deltas[file] = d
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = s
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
ds.time[file] = t
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = h
|
||||
}
|
||||
} else if d.is(deltaDeleted) {
|
||||
ds.deltas[file] = d
|
||||
} else {
|
||||
// Once we've found at least one unchanged file,
|
||||
// we know that not everything has changed,
|
||||
@@ -198,8 +260,17 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
|
||||
for _, file := range now.list {
|
||||
if !old.has(file) {
|
||||
b.indent(msg, file, "File is new")
|
||||
b.indent(msg, file, Color(terminal.GreenFg, "File is new"))
|
||||
ds.deltas[file] = deltaNew
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = now.getSize(file)
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
ds.time[file] = now.getTime(file)
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = now.getHash(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,7 +288,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@@ -226,9 +297,17 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
delete1 := bilib.Names{}
|
||||
delete2 := bilib.Names{}
|
||||
handled := bilib.Names{}
|
||||
renameSkipped := bilib.Names{}
|
||||
deletedonboth := bilib.Names{}
|
||||
skippedDirs1 := newFileList()
|
||||
skippedDirs2 := newFileList()
|
||||
b.renames = renames{}
|
||||
|
||||
ctxMove := b.opt.setDryRun(ctx)
|
||||
|
||||
// update AliasMap for deleted files, as march does not know about them
|
||||
b.updateAliases(ctx, ds1, ds2)
|
||||
|
||||
// efficient isDir check
|
||||
// we load the listing just once and store only the dirs
|
||||
dirs1, dirs1Err := b.listDirsOnly(1)
|
||||
@@ -259,14 +338,32 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
ctxCheck, filterCheck := filter.AddConfig(ctxNew)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
alias := b.aliases.Alias(file)
|
||||
d1 := ds1.deltas[file]
|
||||
if d1.is(deltaOther) {
|
||||
d2 := ds2.deltas[file]
|
||||
d2, in2 := ds2.deltas[file]
|
||||
file2 := file
|
||||
if !in2 && file != alias {
|
||||
d2 = ds2.deltas[alias]
|
||||
file2 = alias
|
||||
}
|
||||
if d2.is(deltaOther) {
|
||||
if err := filterCheck.AddFile(file); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
// if size or hash differ, skip this, as we already know they're not equal
|
||||
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
|
||||
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
|
||||
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", file)
|
||||
checkit := func(filename string) {
|
||||
if err := filterCheck.AddFile(filename); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", filename)
|
||||
}
|
||||
}
|
||||
checkit(file)
|
||||
if file != alias {
|
||||
checkit(alias)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -276,12 +373,17 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
alias := b.aliases.Alias(file)
|
||||
p1 := path1 + file
|
||||
p2 := path2 + file
|
||||
p2 := path2 + alias
|
||||
d1 := ds1.deltas[file]
|
||||
|
||||
if d1.is(deltaOther) {
|
||||
d2, in2 := ds2.deltas[file]
|
||||
// try looking under alternate name
|
||||
if !in2 && file != alias {
|
||||
d2, in2 = ds2.deltas[alias]
|
||||
}
|
||||
if !in2 {
|
||||
b.indent("Path1", p2, "Queue copy to Path2")
|
||||
copy1to2.Add(file)
|
||||
@@ -293,30 +395,46 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
if dirs1.has(file) && dirs2.has(file) {
|
||||
fs.Debugf(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
ls2.getPut(file, skippedDirs2)
|
||||
b.debugFn(file, func() {
|
||||
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName)))
|
||||
})
|
||||
} else {
|
||||
equal := matches.Has(file)
|
||||
if !equal {
|
||||
equal = matches.Has(alias)
|
||||
}
|
||||
if equal {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
if ciCheck.FixCase && file != alias {
|
||||
// the content is equal but filename still needs to be FixCase'd, so copy1to2
|
||||
// the Path1 version is deemed "correct" in this scenario
|
||||
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
|
||||
copy1to2.Add(file)
|
||||
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
|
||||
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
|
||||
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) {
|
||||
// Path2 is newer
|
||||
b.indent("Path2", p1, "Queue copy to Path1")
|
||||
copy2to1.Add(ls2.getTryAlias(file, alias))
|
||||
} else {
|
||||
// Path1 is newer
|
||||
b.indent("Path1", p2, "Queue copy to Path2")
|
||||
copy1to2.Add(ls1.getTryAlias(file, alias))
|
||||
}
|
||||
} else {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
renameSkipped.Add(file)
|
||||
renameSkipped.Add(alias)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(nil, "Files are NOT equal: %s", file)
|
||||
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
|
||||
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
|
||||
b.critical = true
|
||||
err = b.resolve(ctxMove, path1, path2, file, alias, &renameSkipped, ©1to2, ©2to1, ds1, ds2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
|
||||
copy1to2.Add(file + "..path1")
|
||||
|
||||
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
|
||||
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
|
||||
return
|
||||
}
|
||||
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
|
||||
copy2to1.Add(file + "..path2")
|
||||
}
|
||||
}
|
||||
handled.Add(file)
|
||||
@@ -324,24 +442,37 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
} else {
|
||||
// Path1 deleted
|
||||
d2, in2 := ds2.deltas[file]
|
||||
// try looking under alternate name
|
||||
fs.Debugf(file, "alias: %s, in2: %v", alias, in2)
|
||||
if !in2 && file != alias {
|
||||
fs.Debugf(file, "looking for alias: %s", alias)
|
||||
d2, in2 = ds2.deltas[alias]
|
||||
if in2 {
|
||||
fs.Debugf(file, "detected alias: %s", alias)
|
||||
}
|
||||
}
|
||||
if !in2 {
|
||||
b.indent("Path2", p2, "Queue delete")
|
||||
delete2.Add(file)
|
||||
copy1to2.Add(file)
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("Path2", p1, "Queue copy to Path1")
|
||||
copy2to1.Add(file)
|
||||
handled.Add(file)
|
||||
} else if d2.is(deltaDeleted) {
|
||||
handled.Add(file)
|
||||
deletedonboth.Add(file)
|
||||
deletedonboth.Add(alias)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range ds2.sort() {
|
||||
p1 := path1 + file
|
||||
alias := b.aliases.Alias(file)
|
||||
p1 := path1 + alias
|
||||
d2 := ds2.deltas[file]
|
||||
|
||||
if handled.Has(file) {
|
||||
if handled.Has(file) || handled.Has(alias) {
|
||||
continue
|
||||
}
|
||||
if d2.is(deltaOther) {
|
||||
@@ -351,58 +482,68 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
// Deleted
|
||||
b.indent("Path1", p1, "Queue delete")
|
||||
delete1.Add(file)
|
||||
copy2to1.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() {
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
if err != nil {
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
|
||||
// retries, if any
|
||||
results2to1, err = b.retryFastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1", results2to1, err)
|
||||
|
||||
if !b.InGracefulShutdown && err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, "make")
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() {
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
if err != nil {
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
|
||||
// retries, if any
|
||||
results1to2, err = b.retryFastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2", results1to2, err)
|
||||
|
||||
if !b.InGracefulShutdown && err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, "make")
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
if delete1.NotEmpty() {
|
||||
changes1 = true
|
||||
b.indent("", "Path1", "Do queued deletes on")
|
||||
err = b.fastDelete(ctx, b.fs1, delete1, "delete1")
|
||||
if err != nil {
|
||||
if delete1.NotEmpty() && !b.InGracefulShutdown {
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, "remove")
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
if delete2.NotEmpty() {
|
||||
changes2 = true
|
||||
b.indent("", "Path2", "Do queued deletes on")
|
||||
err = b.fastDelete(ctx, b.fs2, delete2, "delete2")
|
||||
if err != nil {
|
||||
if delete2.NotEmpty() && !b.InGracefulShutdown {
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, "remove")
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
queues.copy1to2 = copy1to2
|
||||
queues.copy2to1 = copy2to1
|
||||
queues.renameSkipped = renameSkipped
|
||||
queues.deletedonboth = deletedonboth
|
||||
queues.skippedDirs1 = skippedDirs1
|
||||
queues.skippedDirs2 = skippedDirs2
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -424,3 +565,65 @@ func (ds *deltaSet) excessDeletes() bool {
|
||||
maxDelete, ds.deleted, ds.oldCount, ds.msg, quotePath(bilib.FsPath(ds.fs)))
|
||||
return true
|
||||
}
|
||||
|
||||
// normally we build the AliasMap from march results,
|
||||
// however, march does not know about deleted files, so need to manually check them for aliases
|
||||
func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// skip if not needed
|
||||
if ci.NoUnicodeNormalization && !ci.IgnoreCaseSync && !b.fs1.Features().CaseInsensitive && !b.fs2.Features().CaseInsensitive {
|
||||
return
|
||||
}
|
||||
if ds1.deleted < 1 && ds2.deleted < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Updating AliasMap")
|
||||
|
||||
transform := func(s string) string {
|
||||
if !ci.NoUnicodeNormalization {
|
||||
s = norm.NFC.String(s)
|
||||
}
|
||||
// note: march only checks the dest, but we check both here
|
||||
if ci.IgnoreCaseSync || b.fs1.Features().CaseInsensitive || b.fs2.Features().CaseInsensitive {
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
delMap1 := map[string]string{} // [transformedname]originalname
|
||||
delMap2 := map[string]string{} // [transformedname]originalname
|
||||
fullMap1 := map[string]string{} // [transformedname]originalname
|
||||
fullMap2 := map[string]string{} // [transformedname]originalname
|
||||
|
||||
for _, name := range ls1.list {
|
||||
fullMap1[transform(name)] = name
|
||||
}
|
||||
for _, name := range ls2.list {
|
||||
fullMap2[transform(name)] = name
|
||||
}
|
||||
|
||||
addDeletes := func(ds *deltaSet, delMap, fullMap map[string]string) {
|
||||
for _, file := range ds.sort() {
|
||||
d := ds.deltas[file]
|
||||
if d.is(deltaDeleted) {
|
||||
delMap[transform(file)] = file
|
||||
fullMap[transform(file)] = file
|
||||
}
|
||||
}
|
||||
}
|
||||
addDeletes(ds1, delMap1, fullMap1)
|
||||
addDeletes(ds2, delMap2, fullMap2)
|
||||
|
||||
addAliases := func(delMap, fullMap map[string]string) {
|
||||
for transformedname, name := range delMap {
|
||||
matchedName, found := fullMap[transformedname]
|
||||
if found && name != matchedName {
|
||||
fs.Debugf(name, "adding alias %s", matchedName)
|
||||
b.aliases.Add(name, matchedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
addAliases(delMap1, fullMap2)
|
||||
addAliases(delMap2, fullMap1)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ func makeHelp(help string) string {
|
||||
"|", "`",
|
||||
"{MAXDELETE}", strconv.Itoa(DefaultMaxDelete),
|
||||
"{CHECKFILE}", DefaultCheckFilename,
|
||||
"{WORKDIR}", DefaultWorkdir,
|
||||
// "{WORKDIR}", DefaultWorkdir,
|
||||
)
|
||||
return replacer.Replace(help)
|
||||
}
|
||||
@@ -37,7 +37,9 @@ var rcHelp = makeHelp(`This takes the following parameters
|
||||
- ignoreListingChecksum - Do not use checksums for listings
|
||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||
Use at your own risk!
|
||||
- workdir - server directory for history files (default: {WORKDIR})
|
||||
- workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
|
||||
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
|
||||
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
|
||||
- noCleanup - retain working files
|
||||
|
||||
See [bisync command help](https://rclone.org/commands/rclone_bisync/)
|
||||
@@ -54,5 +56,10 @@ On each successive run it will:
|
||||
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is **in beta** and is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
`)
|
||||
|
||||
@@ -5,18 +5,23 @@ package bisync
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// ListingHeader defines first line of a listing
|
||||
@@ -32,7 +37,7 @@ const ListingHeader = "# bisync listing v1 from"
|
||||
// id: "-" (reserved)
|
||||
const lineFormat = "%s %8d %s %s %s %q\n"
|
||||
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
|
||||
// timeFormat defines time format used in listings
|
||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
@@ -65,27 +70,73 @@ func newFileList() *fileList {
|
||||
}
|
||||
|
||||
func (ls *fileList) empty() bool {
|
||||
if ls == nil {
|
||||
return true
|
||||
}
|
||||
return len(ls.list) == 0
|
||||
}
|
||||
|
||||
func (ls *fileList) has(file string) bool {
|
||||
if file == "" {
|
||||
fs.Debugf(nil, "called ls.has() with blank string")
|
||||
return false
|
||||
}
|
||||
_, found := ls.info[file]
|
||||
if !found {
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
_, found = ls.info[file]
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func (ls *fileList) get(file string) *fileInfo {
|
||||
return ls.info[file]
|
||||
info, found := ls.info[file]
|
||||
if !found {
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
info = ls.info[fmt.Sprint(file)]
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string, flags string) {
|
||||
// copy file from ls to dest
|
||||
func (ls *fileList) getPut(file string, dest *fileList) {
|
||||
f := ls.get(file)
|
||||
dest.put(file, f.size, f.time, f.hash, f.id, f.flags)
|
||||
}
|
||||
|
||||
func (ls *fileList) getPutAll(dest *fileList) {
|
||||
for file, f := range ls.info {
|
||||
dest.put(file, f.size, f.time, f.hash, f.id, f.flags)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) remove(file string) {
|
||||
if ls.has(file) {
|
||||
ls.list = slices.Delete(ls.list, slices.Index(ls.list, file), slices.Index(ls.list, file)+1)
|
||||
delete(ls.info, file)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) put(file string, size int64, modtime time.Time, hash, id string, flags string) {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
fi.size = size
|
||||
fi.time = time
|
||||
// if already have higher precision of same time, avoid overwriting it
|
||||
if fi.time != modtime {
|
||||
if modtime.Before(fi.time) && fi.time.Sub(modtime) < time.Second {
|
||||
modtime = fi.time
|
||||
}
|
||||
}
|
||||
fi.time = modtime
|
||||
fi.hash = hash
|
||||
fi.id = id
|
||||
fi.flags = flags
|
||||
} else {
|
||||
fi = &fileInfo{
|
||||
size: size,
|
||||
time: time,
|
||||
time: modtime,
|
||||
hash: hash,
|
||||
id: id,
|
||||
flags: flags,
|
||||
@@ -95,6 +146,15 @@ func (ls *fileList) put(file string, size int64, time time.Time, hash, id string
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) getTryAlias(file, alias string) string {
|
||||
if ls.has(file) {
|
||||
return file
|
||||
} else if ls.has(alias) {
|
||||
return alias
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ls *fileList) getTime(file string) time.Time {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
@@ -103,6 +163,59 @@ func (ls *fileList) getTime(file string) time.Time {
|
||||
return fi.time
|
||||
}
|
||||
|
||||
func (ls *fileList) getSize(file string) int64 {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
return 0
|
||||
}
|
||||
return fi.size
|
||||
}
|
||||
|
||||
func (ls *fileList) getHash(file string) string {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
return ""
|
||||
}
|
||||
return fi.hash
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool {
|
||||
equal := true
|
||||
if ls1.isDir(file1) && ls2.isDir(file2) {
|
||||
return equal
|
||||
}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(ls1.getSize(file1), ls2.getSize(file2)) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Size not equal in listing. Path1: %v, Path2: %v", ls1.getSize(file1), ls2.getSize(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(b.fctx, ls1.getTime(file1), ls2.getTime(file2), b.fs1, b.fs2) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Modtime not equal in listing. Path1: %v, Path2: %v", ls1.getTime(file1), ls2.getTime(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum && !ignoreListingChecksum {
|
||||
if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
return equal
|
||||
}
|
||||
|
||||
// also returns false if not found
|
||||
func (ls *fileList) isDir(file string) bool {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
if fi.flags == "d" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ls *fileList) beforeOther(other *fileList, file string) bool {
|
||||
thisTime := ls.getTime(file)
|
||||
thatTime := other.getTime(file)
|
||||
@@ -120,12 +233,20 @@ func (ls *fileList) afterTime(file string, time time.Time) bool {
|
||||
return fi.time.After(time)
|
||||
}
|
||||
|
||||
// sort by path name
|
||||
func (ls *fileList) sort() {
|
||||
sort.SliceStable(ls.list, func(i, j int) bool {
|
||||
return ls.list[i] < ls.list[j]
|
||||
})
|
||||
}
|
||||
|
||||
// save will save listing to a file.
|
||||
func (ls *fileList) save(ctx context.Context, listing string) error {
|
||||
file, err := os.Create(listing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ls.sort()
|
||||
|
||||
hashName := ""
|
||||
if ls.hash != hash.None {
|
||||
@@ -172,7 +293,6 @@ func (ls *fileList) save(ctx context.Context, listing string) error {
|
||||
|
||||
// loadListing will load listing from a file.
|
||||
// The key is the path to the file relative to the Path1/Path2 base.
|
||||
// File size of -1, as for Google Docs, prints a warning and won't be loaded.
|
||||
func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
file, err := os.Open(listing)
|
||||
if err != nil {
|
||||
@@ -241,6 +361,24 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// saveOldListings saves the most recent successful listing, in case we need to rollback on error
|
||||
func (b *bisyncRun) saveOldListings() {
|
||||
b.handleErr(b.listing1, "error saving old Path1 listing", bilib.CopyFileIfExists(b.listing1, b.listing1+"-old"), true, true)
|
||||
b.handleErr(b.listing2, "error saving old Path2 listing", bilib.CopyFileIfExists(b.listing2, b.listing2+"-old"), true, true)
|
||||
}
|
||||
|
||||
// replaceCurrentListings saves both ".lst-new" listings as ".lst"
|
||||
func (b *bisyncRun) replaceCurrentListings() {
|
||||
b.handleErr(b.newListing1, "error replacing Path1 listing", bilib.CopyFileIfExists(b.newListing1, b.listing1), true, true)
|
||||
b.handleErr(b.newListing2, "error replacing Path2 listing", bilib.CopyFileIfExists(b.newListing2, b.listing2), true, true)
|
||||
}
|
||||
|
||||
// revertToOldListings reverts to the most recent successful listing
|
||||
func (b *bisyncRun) revertToOldListings() {
|
||||
b.handleErr(b.listing1, "error reverting to old Path1 listing", bilib.CopyFileIfExists(b.listing1+"-old", b.listing1), true, true)
|
||||
b.handleErr(b.listing2, "error reverting to old Path2 listing", bilib.CopyFileIfExists(b.listing2+"-old", b.listing2), true, true)
|
||||
}
|
||||
|
||||
func parseHash(str string) (string, string, error) {
|
||||
if str == "-" {
|
||||
return "", "", nil
|
||||
@@ -254,71 +392,6 @@ func parseHash(str string) (string, string, error) {
|
||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// makeListing will produce listing from directory tree and write it to a file
|
||||
func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (ls *fileList, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
depth := ci.MaxDepth
|
||||
hashType := hash.None
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
// Currently bisync just honors --ignore-listing-checksum
|
||||
// (note that this is different from --ignore-checksum)
|
||||
// TODO add full support for checksums and related flags
|
||||
hashType = f.Hashes().GetOne()
|
||||
}
|
||||
ls = newFileList()
|
||||
ls.hash = hashType
|
||||
var lock sync.Mutex
|
||||
listType := walk.ListObjects
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
listType = walk.ListAll
|
||||
}
|
||||
err = walk.ListR(ctx, f, "", false, depth, listType, func(entries fs.DirEntries) error {
|
||||
var firstErr error
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
//tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO
|
||||
var (
|
||||
hashVal string
|
||||
hashErr error
|
||||
)
|
||||
if hashType != hash.None {
|
||||
hashVal, hashErr = o.Hash(ctx, hashType)
|
||||
if firstErr == nil {
|
||||
firstErr = hashErr
|
||||
}
|
||||
}
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "-" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
ls.put(o.Remote(), o.Size(), time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
//tr.Done(ctx, nil) // TODO
|
||||
})
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
entries.ForDir(func(o fs.Directory) {
|
||||
var (
|
||||
hashVal string
|
||||
)
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "d" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
//record size as 0 instead of -1, so bisync doesn't think it's a google doc
|
||||
ls.put(o.Remote(), 0, time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
})
|
||||
}
|
||||
return firstErr
|
||||
})
|
||||
if err == nil {
|
||||
err = ls.save(ctx, listing)
|
||||
}
|
||||
if err != nil {
|
||||
b.abort = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
@@ -376,3 +449,439 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
// ConvertPrecision returns the Modtime rounded to Dest's precision if lower, otherwise unchanged
|
||||
// Need to use the other fs's precision (if lower) when copying
|
||||
// Note: we need to use Truncate rather than Round so that After() is reliable.
|
||||
// (2023-11-02 20:22:45.552679442 +0000 < UTC 2023-11-02 20:22:45.553 +0000 UTC)
|
||||
func ConvertPrecision(Modtime time.Time, dst fs.Fs) time.Time {
|
||||
DestPrecision := dst.Precision()
|
||||
|
||||
// In case it's wrapping an Fs with lower precision, try unwrapping and use the lowest.
|
||||
if Modtime.Truncate(DestPrecision).After(Modtime.Truncate(fs.UnWrapFs(dst).Precision())) {
|
||||
DestPrecision = fs.UnWrapFs(dst).Precision()
|
||||
}
|
||||
|
||||
if Modtime.After(Modtime.Truncate(DestPrecision)) {
|
||||
return Modtime.Truncate(DestPrecision)
|
||||
}
|
||||
return Modtime
|
||||
}
|
||||
|
||||
// modifyListing will modify the listing based on the results of the sync
|
||||
func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) {
|
||||
queue := queues.copy2to1
|
||||
direction := "2to1"
|
||||
if is1to2 {
|
||||
queue = queues.copy1to2
|
||||
direction = "1to2"
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "updating %s", direction)
|
||||
prettyprint(results, "results", fs.LogLevelDebug)
|
||||
prettyprint(queue, "queue", fs.LogLevelDebug)
|
||||
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
srcList, err := b.loadListing(srcListing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read prior listing: %w", err)
|
||||
}
|
||||
dstList, err := b.loadListing(dstListing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read prior listing: %w", err)
|
||||
}
|
||||
// set list hash type
|
||||
if b.opt.Resync && !b.opt.IgnoreListingChecksum {
|
||||
if is1to2 {
|
||||
srcList.hash = b.opt.Compare.HashType1
|
||||
dstList.hash = b.opt.Compare.HashType2
|
||||
} else {
|
||||
srcList.hash = b.opt.Compare.HashType2
|
||||
dstList.hash = b.opt.Compare.HashType1
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && srcList.hash == hash.None {
|
||||
srcList.hash = hash.MD5
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && dstList.hash == hash.None {
|
||||
dstList.hash = hash.MD5
|
||||
}
|
||||
}
|
||||
|
||||
b.debugFn(b.DebugName, func() {
|
||||
var rs ResultsSlice = results
|
||||
b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, results has name?: %v", direction, rs.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, srcList has name?: %v, dstList has name?: %v", direction, srcList.has(b.DebugName), dstList.has(b.DebugName)))
|
||||
})
|
||||
|
||||
srcWinners := newFileList()
|
||||
dstWinners := newFileList()
|
||||
errors := newFileList()
|
||||
ctxRecheck, filterRecheck := filter.AddConfig(ctx)
|
||||
|
||||
for _, result := range results {
|
||||
if result.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if result.AltName != "" {
|
||||
b.aliases.Add(result.Name, result.AltName)
|
||||
}
|
||||
|
||||
if result.Flags == "d" && !b.opt.CreateEmptySrcDirs {
|
||||
continue
|
||||
}
|
||||
|
||||
// build src winners list
|
||||
if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
srcWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, src), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to src", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build dst winners list
|
||||
if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
dstWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, dst), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to dst", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build errors list
|
||||
if result.Err != nil || result.Winner.Err != nil {
|
||||
errors.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||
if err := filterRecheck.AddFile(result.Name); err != nil {
|
||||
fs.Debugf(result.Name, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
updateLists := func(side string, winners, list *fileList) {
|
||||
for _, queueFile := range queue.ToList() {
|
||||
if !winners.has(queueFile) && list.has(queueFile) && !errors.has(queueFile) {
|
||||
// removals from side
|
||||
list.remove(queueFile)
|
||||
fs.Debugf(nil, "decision: removed from %s: %v", side, queueFile)
|
||||
} else if winners.has(queueFile) {
|
||||
// copies to side
|
||||
new := winners.get(queueFile)
|
||||
|
||||
// handle normalization
|
||||
if side == "dst" {
|
||||
alias := b.aliases.Alias(queueFile)
|
||||
if alias != queueFile {
|
||||
// use the (non-identical) existing name, unless --fix-case
|
||||
if ci.FixCase {
|
||||
fs.Debugf(direction, "removing %s and adding %s as --fix-case was specified", alias, queueFile)
|
||||
list.remove(alias)
|
||||
} else {
|
||||
fs.Debugf(direction, "casing/unicode difference detected. using %s instead of %s", alias, queueFile)
|
||||
queueFile = alias
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list.put(queueFile, new.size, new.time, new.hash, new.id, new.flags)
|
||||
fs.Debugf(nil, "decision: copied to %s: %v", side, queueFile)
|
||||
} else {
|
||||
fs.Debugf(queueFile, "file in queue but missing from %s transfers", side)
|
||||
if err := filterRecheck.AddFile(queueFile); err != nil {
|
||||
fs.Debugf(queueFile, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
updateLists("src", srcWinners, srcList)
|
||||
updateLists("dst", dstWinners, dstList)
|
||||
|
||||
// account for "deltaOthers" we handled separately
|
||||
if queues.deletedonboth.NotEmpty() {
|
||||
for file := range queues.deletedonboth {
|
||||
srcList.remove(file)
|
||||
dstList.remove(file)
|
||||
}
|
||||
}
|
||||
if b.renames.NotEmpty() && !b.opt.DryRun {
|
||||
// renamed on src and copied to dst
|
||||
for _, rename := range b.renames {
|
||||
srcOldName, srcNewName, dstOldName, dstNewName := rename.getNames(is1to2)
|
||||
fs.Debugf(nil, "%s: srcOldName: %v srcNewName: %v dstOldName: %v dstNewName: %v", direction, srcOldName, srcNewName, dstOldName, dstNewName)
|
||||
// we'll handle the other side when we go the other direction
|
||||
var new *fileInfo
|
||||
// we prefer to get the info from the newNamed versions
|
||||
// since they were actually copied as opposed to operations.MoveFile()'d.
|
||||
// the size/time/hash info is therefore fresher on the renames
|
||||
// but we'll settle for the original if we have to.
|
||||
if srcList.has(srcNewName) {
|
||||
new = srcList.get(srcNewName)
|
||||
} else if srcList.has(dstNewName) {
|
||||
new = srcList.get(dstNewName)
|
||||
} else if srcList.has(srcOldName) {
|
||||
new = srcList.get(srcOldName)
|
||||
} else {
|
||||
// something's odd, so let's recheck
|
||||
if err := filterRecheck.AddFile(srcOldName); err != nil {
|
||||
fs.Debugf(srcOldName, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, ConvertPrecision(new.time, src), new.hash, new.id, new.flags)
|
||||
}
|
||||
if srcNewName != srcOldName {
|
||||
srcList.remove(srcOldName)
|
||||
}
|
||||
if srcNewName != dstOldName {
|
||||
dstList.remove(dstOldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recheck the ones we skipped because they were equal
|
||||
// we never got their info because they were never synced.
|
||||
// TODO: add flag to skip this? (since it re-lists)
|
||||
if queues.renameSkipped.NotEmpty() {
|
||||
skippedList := queues.renameSkipped.ToList()
|
||||
for _, file := range skippedList {
|
||||
if err := filterRecheck.AddFile(file); err != nil {
|
||||
fs.Debugf(file, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// skipped dirs -- nothing to recheck, just add them
|
||||
// (they are not necessarily there already, if they are new)
|
||||
path1List := srcList
|
||||
path2List := dstList
|
||||
if !is1to2 {
|
||||
path1List = dstList
|
||||
path2List = srcList
|
||||
}
|
||||
if !queues.skippedDirs1.empty() {
|
||||
queues.skippedDirs1.getPutAll(path1List)
|
||||
}
|
||||
if !queues.skippedDirs2.empty() {
|
||||
queues.skippedDirs2.getPutAll(path2List)
|
||||
}
|
||||
|
||||
if filterRecheck.HaveFilesFrom() {
|
||||
// also include any aliases
|
||||
recheckFiles := filterRecheck.Files()
|
||||
for recheckFile := range recheckFiles {
|
||||
alias := b.aliases.Alias(recheckFile)
|
||||
if recheckFile != alias {
|
||||
if err := filterRecheck.AddFile(alias); err != nil {
|
||||
fs.Debugf(alias, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.recheck(ctxRecheck, src, dst, srcList, dstList, is1to2)
|
||||
}
|
||||
|
||||
if b.InGracefulShutdown {
|
||||
var toKeep []string
|
||||
var toRollback []string
|
||||
fs.Debugf(direction, "stats for %s", direction)
|
||||
trs := accounting.Stats(ctx).Transferred()
|
||||
for _, tr := range trs {
|
||||
b.debugFn(tr.Name, func() {
|
||||
prettyprint(tr, tr.Name, fs.LogLevelInfo)
|
||||
})
|
||||
if tr.Error == nil && tr.Bytes > 0 || tr.Size <= 0 {
|
||||
prettyprint(tr, "keeping: "+tr.Name, fs.LogLevelDebug)
|
||||
toKeep = append(toKeep, tr.Name)
|
||||
}
|
||||
}
|
||||
// Dirs (for the unlikely event that the shutdown was triggered post-sync during syncEmptyDirs)
|
||||
for _, r := range results {
|
||||
if r.Origin == "syncEmptyDirs" {
|
||||
if srcWinners.has(r.Name) || dstWinners.has(r.Name) {
|
||||
toKeep = append(toKeep, r.Name)
|
||||
fs.Infof(r.Name, "keeping empty dir")
|
||||
}
|
||||
}
|
||||
}
|
||||
oldSrc, oldDst := b.getOldLists(is1to2)
|
||||
prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug)
|
||||
prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug)
|
||||
prettyprint(srcList.list, "srcList", fs.LogLevelDebug)
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
combinedList := Concat(oldSrc.list, oldDst.list, srcList.list, dstList.list)
|
||||
for _, f := range combinedList {
|
||||
if !slices.Contains(toKeep, f) && !slices.Contains(toKeep, b.aliases.Alias(f)) && !b.opt.DryRun {
|
||||
toRollback = append(toRollback, f)
|
||||
}
|
||||
}
|
||||
b.prepareRollback(toRollback, srcList, dstList, is1to2)
|
||||
prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug)
|
||||
prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug)
|
||||
prettyprint(srcList.list, "srcList", fs.LogLevelDebug)
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
|
||||
// clear stats so we only do this once
|
||||
accounting.MaxCompletedTransfers = 0
|
||||
accounting.Stats(ctx).PruneTransfers()
|
||||
}
|
||||
|
||||
if b.DebugName != "" {
|
||||
b.debug(b.DebugName, fmt.Sprintf("%s pre-save srcList has it?: %v", direction, srcList.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName)))
|
||||
}
|
||||
// update files
|
||||
err = srcList.save(ctx, srcListing)
|
||||
b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true)
|
||||
err = dstList.save(ctx, dstListing)
|
||||
b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// recheck the ones we're not sure about
|
||||
func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList, dstList *fileList, is1to2 bool) {
|
||||
var srcObjs []fs.Object
|
||||
var dstObjs []fs.Object
|
||||
var resolved []string
|
||||
var toRollback []string
|
||||
|
||||
if err := operations.ListFn(ctxRecheck, src, func(obj fs.Object) {
|
||||
srcObjs = append(srcObjs, obj)
|
||||
}); err != nil {
|
||||
fs.Debugf(src, "error recchecking src obj: %v", err)
|
||||
}
|
||||
if err := operations.ListFn(ctxRecheck, dst, func(obj fs.Object) {
|
||||
dstObjs = append(dstObjs, obj)
|
||||
}); err != nil {
|
||||
fs.Debugf(dst, "error recchecking dst obj: %v", err)
|
||||
}
|
||||
|
||||
putObj := func(obj fs.Object, list *fileList) {
|
||||
hashVal := ""
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
hashType := list.hash
|
||||
if hashType != hash.None {
|
||||
hashVal, _ = obj.Hash(ctxRecheck, hashType)
|
||||
}
|
||||
hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal)
|
||||
}
|
||||
var modtime time.Time
|
||||
if b.opt.Compare.Modtime {
|
||||
modtime = obj.ModTime(ctxRecheck).In(TZ)
|
||||
}
|
||||
list.put(obj.Remote(), obj.Size(), modtime, hashVal, "-", "-")
|
||||
}
|
||||
|
||||
for _, srcObj := range srcObjs {
|
||||
fs.Debugf(srcObj, "rechecking")
|
||||
for _, dstObj := range dstObjs {
|
||||
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
|
||||
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
|
||||
if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
|
||||
putObj(srcObj, srcList)
|
||||
putObj(dstObj, dstList)
|
||||
resolved = append(resolved, srcObj.Remote())
|
||||
} else {
|
||||
fs.Infof(srcObj, "files not equal on recheck: %v %v", srcObj, dstObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
// if srcObj not resolved by now (either because no dstObj match or files not equal),
|
||||
// roll it back to old version, so it gets retried next time.
|
||||
// skip and error during --resync, as rollback is not possible
|
||||
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
|
||||
if b.opt.Resync {
|
||||
err = errors.New("no dstObj match or files not equal")
|
||||
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
|
||||
} else {
|
||||
toRollback = append(toRollback, srcObj.Remote())
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(toRollback) > 0 {
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
oldSrc, err := b.loadListing(srcListing + "-old")
|
||||
b.handleErr(oldSrc, "error loading old src listing", err, true, true)
|
||||
oldDst, err := b.loadListing(dstListing + "-old")
|
||||
b.handleErr(oldDst, "error loading old dst listing", err, true, true)
|
||||
if b.critical {
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range toRollback {
|
||||
b.rollback(item, oldSrc, srcList)
|
||||
b.rollback(item, oldDst, dstList)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) getListingNames(is1to2 bool) (srcListing string, dstListing string) {
|
||||
if is1to2 {
|
||||
return b.listing1, b.listing2
|
||||
}
|
||||
return b.listing2, b.listing1
|
||||
}
|
||||
|
||||
func (b *bisyncRun) rollback(item string, oldList, newList *fileList) {
|
||||
alias := b.aliases.Alias(item)
|
||||
if oldList.has(item) {
|
||||
oldList.getPut(item, newList)
|
||||
fs.Debugf(nil, "adding to newlist: %s", item)
|
||||
} else if oldList.has(alias) {
|
||||
oldList.getPut(alias, newList)
|
||||
fs.Debugf(nil, "adding to newlist: %s", alias)
|
||||
} else {
|
||||
fs.Debugf(nil, "removing from newlist: %s (has it?: %v)", item, newList.has(item))
|
||||
prettyprint(newList.list, "newList", fs.LogLevelDebug)
|
||||
newList.remove(item)
|
||||
newList.remove(alias)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) prepareRollback(toRollback []string, srcList, dstList *fileList, is1to2 bool) {
|
||||
if len(toRollback) > 0 {
|
||||
oldSrc, oldDst := b.getOldLists(is1to2)
|
||||
if b.critical {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf("new lists", "src: (%v), dest: (%v)", len(srcList.list), len(dstList.list))
|
||||
|
||||
for _, item := range toRollback {
|
||||
b.debugFn(item, func() {
|
||||
b.debug(item, fmt.Sprintf("pre-rollback oldSrc has it?: %v", oldSrc.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback oldDst has it?: %v", oldDst.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback srcList has it?: %v", srcList.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback dstList has it?: %v", dstList.has(item)))
|
||||
})
|
||||
b.rollback(item, oldSrc, srcList)
|
||||
b.rollback(item, oldDst, dstList)
|
||||
b.debugFn(item, func() {
|
||||
b.debug(item, fmt.Sprintf("post-rollback oldSrc has it?: %v", oldSrc.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback oldDst has it?: %v", oldDst.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback srcList has it?: %v", srcList.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback dstList has it?: %v", dstList.has(item)))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) getOldLists(is1to2 bool) (*fileList, *fileList) {
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
oldSrc, err := b.loadListing(srcListing + "-old")
|
||||
b.handleErr(oldSrc, "error loading old src listing", err, true, true)
|
||||
oldDst, err := b.loadListing(dstListing + "-old")
|
||||
b.handleErr(oldDst, "error loading old dst listing", err, true, true)
|
||||
fs.Debugf("get old lists", "is1to2: %v, oldsrc: %s (%v), olddest: %s (%v)", is1to2, srcListing+"-old", len(oldSrc.list), dstListing+"-old", len(oldDst.list))
|
||||
return oldSrc, oldDst
|
||||
}
|
||||
|
||||
// Concat returns a new slice concatenating the passed in slices.
|
||||
func Concat[S ~[]E, E any](ss ...S) S {
|
||||
size := 0
|
||||
for _, s := range ss {
|
||||
size += len(s)
|
||||
if size < 0 {
|
||||
panic("len out of range")
|
||||
}
|
||||
}
|
||||
newslice := slices.Grow[S](nil, size)
|
||||
for _, s := range ss {
|
||||
newslice = append(newslice, s...)
|
||||
}
|
||||
return newslice
|
||||
}
|
||||
|
||||
154
cmd/bisync/lockfile.go
Normal file
154
cmd/bisync/lockfile.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
const basicallyforever = 200 * 365 * 24 * time.Hour
|
||||
|
||||
var stopRenewal func()
|
||||
|
||||
var data = struct {
|
||||
Session string
|
||||
PID string
|
||||
TimeRenewed time.Time
|
||||
TimeExpires time.Time
|
||||
}{}
|
||||
|
||||
func (b *bisyncRun) setLockFile() error {
|
||||
b.lockFile = ""
|
||||
b.setLockFileExpiration()
|
||||
if !b.opt.DryRun {
|
||||
b.lockFile = b.basePath + ".lck"
|
||||
if bilib.FileExists(b.lockFile) {
|
||||
if !b.lockFileIsExpired() {
|
||||
errTip := Color(terminal.MagentaFg, "Tip: this indicates that another bisync run (of these same paths) either is still running or was interrupted before completion. \n")
|
||||
errTip += Color(terminal.MagentaFg, "If you're SURE you want to override this safety feature, you can delete the lock file with the following command, then run bisync again: \n")
|
||||
errTip += fmt.Sprintf(Color(terminal.HiRedFg, "rclone deletefile \"%s\""), b.lockFile)
|
||||
return fmt.Errorf(Color(terminal.RedFg, "prior lock file found: %s \n")+errTip, Color(terminal.HiYellowFg, b.lockFile))
|
||||
}
|
||||
}
|
||||
|
||||
pidStr := []byte(strconv.Itoa(os.Getpid()))
|
||||
if err = os.WriteFile(b.lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
return fmt.Errorf(Color(terminal.RedFg, "cannot create lock file: %s: %w"), b.lockFile, err)
|
||||
}
|
||||
fs.Debugf(nil, "Lock file created: %s", b.lockFile)
|
||||
b.renewLockFile()
|
||||
stopRenewal = b.startLockRenewal()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) removeLockFile() {
|
||||
if b.lockFile != "" {
|
||||
stopRenewal()
|
||||
errUnlock := os.Remove(b.lockFile)
|
||||
if errUnlock == nil {
|
||||
fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
|
||||
} else if err == nil {
|
||||
err = errUnlock
|
||||
} else {
|
||||
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock)
|
||||
}
|
||||
b.lockFile = "" // block removing it again
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setLockFileExpiration() {
|
||||
if b.opt.MaxLock > 0 && b.opt.MaxLock < 2*time.Minute {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "--max-lock cannot be shorter than 2 minutes (unless 0.) Changing --max-lock from %v to %v"), b.opt.MaxLock, 2*time.Minute)
|
||||
b.opt.MaxLock = 2 * time.Minute
|
||||
} else if b.opt.MaxLock <= 0 {
|
||||
b.opt.MaxLock = basicallyforever
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) renewLockFile() {
|
||||
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
||||
|
||||
data.Session = b.basePath
|
||||
data.PID = strconv.Itoa(os.Getpid())
|
||||
data.TimeRenewed = time.Now()
|
||||
data.TimeExpires = time.Now().Add(b.opt.MaxLock)
|
||||
|
||||
// save data file
|
||||
df, err := os.Create(b.lockFile)
|
||||
b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
|
||||
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true)
|
||||
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
|
||||
if b.opt.MaxLock < basicallyforever {
|
||||
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) lockFileIsExpired() bool {
|
||||
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
||||
rdf, err := os.Open(b.lockFile)
|
||||
b.handleErr(b.lockFile, "error reading lock file", err, true, true)
|
||||
dec := json.NewDecoder(rdf)
|
||||
for {
|
||||
if err := dec.Decode(&data); err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
|
||||
if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) {
|
||||
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires)
|
||||
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
|
||||
markFailed(b.listing2)
|
||||
return true
|
||||
}
|
||||
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second))
|
||||
prettyprint(data, "Lockfile info", fs.LogLevelInfo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StartLockRenewal renews the lockfile every --max-lock minus one minute.
|
||||
//
|
||||
// It returns a func which should be called to stop the renewal.
|
||||
func (b *bisyncRun) startLockRenewal() func() {
|
||||
if b.opt.MaxLock <= 0 || b.opt.MaxLock >= basicallyforever || b.lockFile == "" {
|
||||
return func() {}
|
||||
}
|
||||
stopLockRenewal := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ticker := time.NewTicker(b.opt.MaxLock - time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
b.renewLockFile()
|
||||
case <-stopLockRenewal:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
close(stopLockRenewal)
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func markFailed(file string) {
|
||||
failFile := file + "-err"
|
||||
if bilib.FileExists(file) {
|
||||
_ = os.Remove(failFile)
|
||||
_ = os.Rename(file, failFile)
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user