mirror of
https://github.com/rclone/rclone.git
synced 2026-02-27 01:43:15 +00:00
Compare commits
3 Commits
master
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed85edef50 | ||
|
|
3885800959 | ||
|
|
698373fd5c |
24
.github/workflows/build.yml
vendored
24
.github/workflows/build.yml
vendored
@@ -29,12 +29,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.25']
|
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '~1.26.0'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -45,14 +45,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: linux_386
|
- job_name: linux_386
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '~1.26.0'
|
go: '>=1.25.0-rc.1'
|
||||||
goarch: 386
|
goarch: 386
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-latest
|
os: macos-latest
|
||||||
go: '~1.26.0'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -61,14 +61,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-latest
|
os: macos-latest
|
||||||
go: '~1.26.0'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '~1.26.0'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
cgo: '0'
|
||||||
build_flags: '-include "^windows/"'
|
build_flags: '-include "^windows/"'
|
||||||
@@ -78,14 +78,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '~1.26.0'
|
go: '>=1.25.0-rc.1'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.25
|
- job_name: go1.24
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '~1.25.7'
|
go: '1.24'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
@@ -224,7 +224,7 @@ jobs:
|
|||||||
id: setup-go
|
id: setup-go
|
||||||
uses: actions/setup-go@v6
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '~1.26.0'
|
go-version: '>=1.24.0-rc.1'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
@@ -283,7 +283,7 @@ jobs:
|
|||||||
run: govulncheck ./...
|
run: govulncheck ./...
|
||||||
|
|
||||||
- name: Check Markdown format
|
- name: Check Markdown format
|
||||||
uses: DavidAnson/markdownlint-cli2-action@v20
|
uses: DavidAnson/markdownlint-cli2-action@v22
|
||||||
with:
|
with:
|
||||||
globs: |
|
globs: |
|
||||||
CONTRIBUTING.md
|
CONTRIBUTING.md
|
||||||
@@ -315,7 +315,7 @@ jobs:
|
|||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v6
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '~1.26.0'
|
go-version: '>=1.25.0-rc.1'
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@@ -412,8 +412,8 @@ the source file in the `Help:` field:
|
|||||||
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
||||||
and you can also run this directly, optionally with the name of a backend
|
and you can also run this directly, optionally with the name of a backend
|
||||||
as argument to only update the docs for a specific backend.
|
as argument to only update the docs for a specific backend.
|
||||||
- **Do not** commit the updated Markdown files. This operation is run as part of
|
- **Do not** commit the updated Markdown files. This operation is run as part
|
||||||
the release process. Since any manual changes in the autogenerated sections
|
of the release process. Since any manual changes in the autogenerated sections
|
||||||
of the Markdown files will then be lost, we have a pull request check that
|
of the Markdown files will then be lost, we have a pull request check that
|
||||||
reports error for any changes within the autogenerated sections. Should you
|
reports error for any changes within the autogenerated sections. Should you
|
||||||
have done manual changes outside of the autogenerated sections they must be
|
have done manual changes outside of the autogenerated sections they must be
|
||||||
@@ -580,7 +580,8 @@ remote or an fs.
|
|||||||
make sure we can encode any path name and `rclone info` to help determine the
|
make sure we can encode any path name and `rclone info` to help determine the
|
||||||
encodings needed
|
encodings needed
|
||||||
- `rclone purge -v TestRemote:rclone-info`
|
- `rclone purge -v TestRemote:rclone-info`
|
||||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
- `rclone test info --all --remote-encoding None -vv --write-json remote.json
|
||||||
|
TestRemote:rclone-info`
|
||||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
- open `remote.csv` in a spreadsheet and examine
|
- open `remote.csv` in a spreadsheet and examine
|
||||||
|
|
||||||
@@ -632,22 +633,14 @@ Add your backend to the docs - you'll need to pick an icon for it from
|
|||||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
First add a data file about your backend in
|
|
||||||
`docs/data/backends/remote.yaml` - this is used to build the overview
|
|
||||||
tables and the tiering info.
|
|
||||||
|
|
||||||
- Create it with: `bin/manage_backends.py create docs/data/backends/remote.yaml`
|
|
||||||
- Edit it to fill in the blanks. Look at the [tiers docs](https://rclone.org/tiers/).
|
|
||||||
- Run this command to fill in the features: `bin/manage_backends.py features docs/data/backends/remote.yaml`
|
|
||||||
|
|
||||||
Next edit these files:
|
|
||||||
|
|
||||||
- `README.md` - main GitHub page
|
- `README.md` - main GitHub page
|
||||||
- `docs/content/remote.md` - main docs page (note the backend options are
|
- `docs/content/remote.md` - main docs page (note the backend options are
|
||||||
automatically added to this file with `make backenddocs`)
|
automatically added to this file with `make backenddocs`)
|
||||||
- make sure this has the `autogenerated options` comments in (see your
|
- make sure this has the `autogenerated options` comments in (see your
|
||||||
reference backend docs)
|
reference backend docs)
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||||
|
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
||||||
|
table and the Optional Features table.
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
- `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
- `docs/content/_index.md` - front page of rclone.org
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
|
|||||||
@@ -2,27 +2,27 @@
|
|||||||
|
|
||||||
Current active maintainers of rclone are:
|
Current active maintainers of rclone are:
|
||||||
|
|
||||||
| Name | GitHub ID | Specific Responsibilities |
|
| Name | GitHub ID | Specific Responsibilities |
|
||||||
| :--------------- | :---------------- | :-------------------------- |
|
| :--------------- | :---------------- | :------------------------------------- |
|
||||||
| Nick Craig-Wood | @ncw | overall project health |
|
| Nick Craig-Wood | @ncw | overall project health |
|
||||||
| Stefan Breunig | @breunigs | |
|
| Stefan Breunig | @breunigs | |
|
||||||
| Ishuah Kariuki | @ishuah | |
|
| Ishuah Kariuki | @ishuah | |
|
||||||
| Remus Bunduc | @remusb | cache backend |
|
| Remus Bunduc | @remusb | cache backend |
|
||||||
| Fabian Möller | @B4dM4n | |
|
| Fabian Möller | @B4dM4n | |
|
||||||
| Alex Chen | @Cnly | onedrive backend |
|
| Alex Chen | @Cnly | onedrive backend |
|
||||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||||
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
||||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||||
| Max Sum | @Max-Sum | union backend |
|
| Max Sum | @Max-Sum | union backend |
|
||||||
| Fred | @creativeprojects | seafile backend |
|
| Fred | @creativeprojects | seafile backend |
|
||||||
| Caleb Case | @calebcase | storj backend |
|
| Caleb Case | @calebcase | storj backend |
|
||||||
| wiserain | @wiserain | pikpak backend |
|
| wiserain | @wiserain | pikpak backend |
|
||||||
| albertony | @albertony | |
|
| albertony | @albertony | |
|
||||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||||
| nielash | @nielash | bisync |
|
| nielash | @nielash | bisync |
|
||||||
| Dan McArdle | @dmcardle | gitannex |
|
| Dan McArdle | @dmcardle | gitannex |
|
||||||
| Sam Harrison | @childish-sambino | filescom |
|
| Sam Harrison | @childish-sambino | filescom |
|
||||||
|
|
||||||
## This is a work in progress draft
|
## This is a work in progress draft
|
||||||
|
|
||||||
|
|||||||
6166
MANUAL.html
generated
6166
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2409
MANUAL.txt
generated
2409
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
6
Makefile
6
Makefile
@@ -216,12 +216,6 @@ beta:
|
|||||||
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
rclone -v copy build/ pub.rclone.org:/$(TAG)
|
||||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||||
|
|
||||||
privatebeta:
|
|
||||||
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) -include '^(darwin|windows|linux)/(arm64|amd64)$$' $(TAG)
|
|
||||||
rclone -Pv copy build/ private-downloads:/beta/$(TAG)
|
|
||||||
@echo Private beta release ready at private-downloads:/beta/$(TAG)/
|
|
||||||
rclone link private-downloads:/beta/$(TAG)
|
|
||||||
|
|
||||||
log_since_last_release:
|
log_since_last_release:
|
||||||
git log $(LAST_TAG)..
|
git log $(LAST_TAG)..
|
||||||
|
|
||||||
|
|||||||
10
README.md
10
README.md
@@ -28,26 +28,21 @@ directories to and from different cloud storage providers.
|
|||||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||||
- Bizfly Cloud Simple Storage [:page_facing_up:](https://rclone.org/s3/#bizflycloud)
|
|
||||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
|
||||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||||
- Cloudinary [:page_facing_up:](https://rclone.org/cloudinary/)
|
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
- Drime [:page_facing_up:](https://rclone.org/s3/#drime)
|
|
||||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||||
- Fastly Object Storage [:page_facing_up:](https://rclone.org/s3/#fastly)
|
|
||||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||||
- Filen [:page_facing_up:](https://rclone.org/filen/)
|
|
||||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
@@ -64,7 +59,6 @@ directories to and from different cloud storage providers.
|
|||||||
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||||
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
- Internxt [:page_facing_up:](https://rclone.org/internxt/)
|
|
||||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||||
@@ -118,6 +112,7 @@ directories to and from different cloud storage providers.
|
|||||||
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
||||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||||
|
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||||
@@ -126,7 +121,6 @@ directories to and from different cloud storage providers.
|
|||||||
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
- Zadara Object Storage [:page_facing_up:](https://rclone.org/s3/#zadara)
|
|
||||||
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
||||||
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
|
|||||||
53
RELEASE.md
53
RELEASE.md
@@ -109,59 +109,6 @@ go run github.com/icholy/gomajor@latest list -major
|
|||||||
|
|
||||||
Expect API breakage when updating major versions.
|
Expect API breakage when updating major versions.
|
||||||
|
|
||||||
## Updating Go
|
|
||||||
|
|
||||||
When a new Go stable is released update to it. We support the current
|
|
||||||
stable Go and the previous release which is in line with the rest of
|
|
||||||
the Go ecosystem.
|
|
||||||
|
|
||||||
These files will need editing:
|
|
||||||
|
|
||||||
- `.github/workflows/build.yml` - change current and previous Go versions
|
|
||||||
- `docs/content/install.md` - change minimum Go version required
|
|
||||||
- `fs/versioncheck.go` - update minimum Go version required
|
|
||||||
- `go.mod` - update minimum Go version required
|
|
||||||
|
|
||||||
Check it builds
|
|
||||||
|
|
||||||
- `make GOTAGS=cmount`
|
|
||||||
- `make compiletest`
|
|
||||||
|
|
||||||
Assuming `go1.XX` is current and `go1.YY` is previous version:
|
|
||||||
|
|
||||||
Use `git grep go1.YY` and `git grep go1.YY` to look for opportunities
|
|
||||||
to remove build tags we no longer need.
|
|
||||||
|
|
||||||
Commit with message like this:
|
|
||||||
|
|
||||||
```text
|
|
||||||
build: update to go1.YY and make go1.YY the minimum required version
|
|
||||||
```
|
|
||||||
|
|
||||||
Send to CI and if it passes, merge.
|
|
||||||
|
|
||||||
### gofix
|
|
||||||
|
|
||||||
Updating the minimum required version of Go is a good opportunity to
|
|
||||||
run the `go fix` command to modernize Go usage.
|
|
||||||
|
|
||||||
This needs to be run for all architectures.
|
|
||||||
|
|
||||||
```console
|
|
||||||
GOOS=linux go fix -tags cmount ./...
|
|
||||||
GOOS=freebsd go fix -tags cmount ./...
|
|
||||||
GOOS=windows go fix -tags cmount ./...
|
|
||||||
GOOS=darwin go fix -tags cmount ./...
|
|
||||||
```
|
|
||||||
|
|
||||||
Examine the diff carefully.
|
|
||||||
|
|
||||||
Commit with message
|
|
||||||
|
|
||||||
```text
|
|
||||||
build: modernize Go code with go fix for go1.YY
|
|
||||||
```
|
|
||||||
|
|
||||||
## Tidy beta
|
## Tidy beta
|
||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
|
|||||||
@@ -16,13 +16,11 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/compress"
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/doi"
|
_ "github.com/rclone/rclone/backend/doi"
|
||||||
_ "github.com/rclone/rclone/backend/drime"
|
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/rclone/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/rclone/rclone/backend/fichier"
|
||||||
_ "github.com/rclone/rclone/backend/filefabric"
|
_ "github.com/rclone/rclone/backend/filefabric"
|
||||||
_ "github.com/rclone/rclone/backend/filelu"
|
_ "github.com/rclone/rclone/backend/filelu"
|
||||||
_ "github.com/rclone/rclone/backend/filen"
|
|
||||||
_ "github.com/rclone/rclone/backend/filescom"
|
_ "github.com/rclone/rclone/backend/filescom"
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/gofile"
|
_ "github.com/rclone/rclone/backend/gofile"
|
||||||
@@ -35,7 +33,6 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/iclouddrive"
|
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/imagekit"
|
_ "github.com/rclone/rclone/backend/imagekit"
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||||
_ "github.com/rclone/rclone/backend/internxt"
|
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
_ "github.com/rclone/rclone/backend/linkbox"
|
_ "github.com/rclone/rclone/backend/linkbox"
|
||||||
@@ -67,6 +64,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/ulozto"
|
_ "github.com/rclone/rclone/backend/ulozto"
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
|
_ "github.com/rclone/rclone/backend/uptobox"
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
_ "github.com/rclone/rclone/backend/zoho"
|
_ "github.com/rclone/rclone/backend/zoho"
|
||||||
|
|||||||
@@ -1,584 +0,0 @@
|
|||||||
// Package auth supplies the authentication and client creation for the azure SDK
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/lib/env"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Default storage account, key and blob endpoint for emulator support,
|
|
||||||
// though it is a base64 key checked in here, it is publicly available secret.
|
|
||||||
emulatorAccount = "devstoreaccount1"
|
|
||||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
|
||||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigOptions is the common authentication options for azure
|
|
||||||
var ConfigOptions = []fs.Option{{
|
|
||||||
Name: "account",
|
|
||||||
Help: `Azure Storage Account Name.
|
|
||||||
|
|
||||||
Set this to the Azure Storage Account Name in use.
|
|
||||||
|
|
||||||
Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
|
|
||||||
|
|
||||||
If this is blank and if env_auth is set it will be read from the
|
|
||||||
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
|
|
||||||
`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "env_auth",
|
|
||||||
Help: `Read credentials from runtime (environment variables, CLI or MSI).
|
|
||||||
|
|
||||||
See the [authentication docs](/azureblob#authentication) for full info.`,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
|
||||||
Name: "key",
|
|
||||||
Help: `Storage Account Shared Key.
|
|
||||||
|
|
||||||
Leave blank to use SAS URL or Emulator.`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "sas_url",
|
|
||||||
Help: `SAS URL for container level access only.
|
|
||||||
|
|
||||||
Leave blank if using account/key or Emulator.`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "connection_string",
|
|
||||||
Help: `Storage Connection String.
|
|
||||||
|
|
||||||
Connection string for the storage. Leave blank if using other auth methods.
|
|
||||||
`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "tenant",
|
|
||||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
|
||||||
|
|
||||||
Set this if using
|
|
||||||
- Service principal with client secret
|
|
||||||
- Service principal with certificate
|
|
||||||
- User with username and password
|
|
||||||
`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "client_id",
|
|
||||||
Help: `The ID of the client in use.
|
|
||||||
|
|
||||||
Set this if using
|
|
||||||
- Service principal with client secret
|
|
||||||
- Service principal with certificate
|
|
||||||
- User with username and password
|
|
||||||
`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "client_secret",
|
|
||||||
Help: `One of the service principal's client secrets
|
|
||||||
|
|
||||||
Set this if using
|
|
||||||
- Service principal with client secret
|
|
||||||
`,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "client_certificate_path",
|
|
||||||
Help: `Path to a PEM or PKCS12 certificate file including the private key.
|
|
||||||
|
|
||||||
Set this if using
|
|
||||||
- Service principal with certificate
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
Name: "client_certificate_password",
|
|
||||||
Help: `Password for the certificate file (optional).
|
|
||||||
|
|
||||||
Optionally set this if using
|
|
||||||
- Service principal with certificate
|
|
||||||
|
|
||||||
And the certificate has a password.
|
|
||||||
`,
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
|
||||||
Name: "client_send_certificate_chain",
|
|
||||||
Help: `Send the certificate chain when using certificate auth.
|
|
||||||
|
|
||||||
Specifies whether an authentication request will include an x5c header
|
|
||||||
to support subject name / issuer based authentication. When set to
|
|
||||||
true, authentication requests include the x5c header.
|
|
||||||
|
|
||||||
Optionally set this if using
|
|
||||||
- Service principal with certificate
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "username",
|
|
||||||
Help: `User name (usually an email address)
|
|
||||||
|
|
||||||
Set this if using
|
|
||||||
- User with username and password
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "password",
|
|
||||||
Help: `The user's password
|
|
||||||
|
|
||||||
Set this if using
|
|
||||||
- User with username and password
|
|
||||||
`,
|
|
||||||
IsPassword: true,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "service_principal_file",
|
|
||||||
Help: `Path to file containing credentials for use with a service principal.
|
|
||||||
|
|
||||||
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
|
||||||
|
|
||||||
$ az ad sp create-for-rbac --name "<name>" \
|
|
||||||
--role "Storage Blob Data Owner" \
|
|
||||||
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
|
||||||
> azure-principal.json
|
|
||||||
|
|
||||||
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
|
||||||
|
|
||||||
It may be more convenient to put the credentials directly into the
|
|
||||||
rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
|
|
||||||
keys instead of setting ` + "`service_principal_file`" + `.
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "disable_instance_discovery",
|
|
||||||
Help: `Skip requesting Microsoft Entra instance metadata
|
|
||||||
|
|
||||||
This should be set true only by applications authenticating in
|
|
||||||
disconnected clouds, or private clouds such as Azure Stack.
|
|
||||||
|
|
||||||
It determines whether rclone requests Microsoft Entra instance
|
|
||||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
|
||||||
authenticating.
|
|
||||||
|
|
||||||
Setting this to true will skip this request, making you responsible
|
|
||||||
for ensuring the configured authority is valid and trustworthy.
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "use_msi",
|
|
||||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
|
||||||
|
|
||||||
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
|
||||||
to authenticate to Azure Storage instead of a SAS token or account key.
|
|
||||||
|
|
||||||
If the VM(SS) on which this program is running has a system-assigned identity, it will
|
|
||||||
be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
|
|
||||||
the user-assigned identity will be used by default. If the resource has multiple user-assigned
|
|
||||||
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
|
||||||
msi_client_id, or msi_mi_res_id parameters.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "msi_object_id",
|
|
||||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "msi_client_id",
|
|
||||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "msi_mi_res_id",
|
|
||||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "use_emulator",
|
|
||||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "use_az",
|
|
||||||
Help: `Use Azure CLI tool az for authentication
|
|
||||||
|
|
||||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
|
||||||
as the sole means of authentication.
|
|
||||||
|
|
||||||
Setting this can be useful if you wish to use the az CLI on a host with
|
|
||||||
a System Managed Identity that you do not want to use.
|
|
||||||
|
|
||||||
Don't set env_auth at the same time.
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "endpoint",
|
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
|
||||||
Advanced: true,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// Options defines the common auth configuration for azure backends
|
|
||||||
type Options struct {
|
|
||||||
Account string `config:"account"`
|
|
||||||
EnvAuth bool `config:"env_auth"`
|
|
||||||
Key string `config:"key"`
|
|
||||||
SASURL string `config:"sas_url"`
|
|
||||||
ConnectionString string `config:"connection_string"`
|
|
||||||
Tenant string `config:"tenant"`
|
|
||||||
ClientID string `config:"client_id"`
|
|
||||||
ClientSecret string `config:"client_secret"`
|
|
||||||
ClientCertificatePath string `config:"client_certificate_path"`
|
|
||||||
ClientCertificatePassword string `config:"client_certificate_password"`
|
|
||||||
ClientSendCertificateChain bool `config:"client_send_certificate_chain"`
|
|
||||||
Username string `config:"username"`
|
|
||||||
Password string `config:"password"`
|
|
||||||
ServicePrincipalFile string `config:"service_principal_file"`
|
|
||||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
|
||||||
UseMSI bool `config:"use_msi"`
|
|
||||||
MSIObjectID string `config:"msi_object_id"`
|
|
||||||
MSIClientID string `config:"msi_client_id"`
|
|
||||||
MSIResourceID string `config:"msi_mi_res_id"`
|
|
||||||
UseEmulator bool `config:"use_emulator"`
|
|
||||||
UseAZ bool `config:"use_az"`
|
|
||||||
Endpoint string `config:"endpoint"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type servicePrincipalCredentials struct {
|
|
||||||
AppID string `json:"appId"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
Tenant string `json:"tenant"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseServicePrincipalCredentials unmarshals a service principal credentials JSON file as generated by az cli.
|
|
||||||
func parseServicePrincipalCredentials(ctx context.Context, credentialsData []byte) (*servicePrincipalCredentials, error) {
|
|
||||||
var spCredentials servicePrincipalCredentials
|
|
||||||
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
|
||||||
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
|
|
||||||
}
|
|
||||||
// TODO: support certificate credentials
|
|
||||||
// Validate all fields present
|
|
||||||
if spCredentials.AppID == "" || spCredentials.Password == "" || spCredentials.Tenant == "" {
|
|
||||||
return nil, fmt.Errorf("missing fields in credentials file")
|
|
||||||
}
|
|
||||||
return &spCredentials, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap the http.Transport to satisfy the Transporter interface
|
|
||||||
type transporter struct {
|
|
||||||
http.RoundTripper
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a new transporter
|
|
||||||
func newTransporter(ctx context.Context) transporter {
|
|
||||||
return transporter{
|
|
||||||
RoundTripper: fshttp.NewTransport(ctx),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do sends the HTTP request and returns the HTTP response or error.
|
|
||||||
func (tr transporter) Do(req *http.Request) (*http.Response, error) {
|
|
||||||
return tr.RoundTripper.RoundTrip(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientOpts should be passed to configure NewClient
|
|
||||||
type NewClientOpts[Client, ClientOptions, SharedKeyCredential any] struct {
|
|
||||||
DefaultBaseURL string // Base URL, eg blob.core.windows.net
|
|
||||||
Blob bool // set if this is blob storage
|
|
||||||
RootContainer string // Container that rclone is looking at
|
|
||||||
NewClient func(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error)
|
|
||||||
NewClientFromConnectionString func(connectionString string, options *ClientOptions) (*Client, error)
|
|
||||||
NewClientWithNoCredential func(serviceURL string, options *ClientOptions) (*Client, error)
|
|
||||||
NewClientWithSharedKeyCredential func(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error)
|
|
||||||
NewSharedKeyCredential func(accountName, accountKey string) (*SharedKeyCredential, error)
|
|
||||||
SetClientOptions func(options *ClientOptions, policyClientOptions policy.ClientOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientResult is returned from NewClient
|
|
||||||
type NewClientResult[Client any] struct {
|
|
||||||
Client *Client // Client to access the Service
|
|
||||||
Cred azcore.TokenCredential // how to generate tokens (may be nil)
|
|
||||||
UsingSharedKeyCred bool // set if using shared key credentials
|
|
||||||
Anonymous bool // true if anonymous authentication was used
|
|
||||||
Container string // Container that SAS URL points to
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient creates a service client from the rclone options
|
|
||||||
func NewClient[Client, ClientOptions, SharedKeyCredential any](ctx context.Context, conf NewClientOpts[Client, ClientOptions, SharedKeyCredential], opt *Options) (r NewClientResult[Client], err error) {
|
|
||||||
var sharedKeyCred *SharedKeyCredential
|
|
||||||
|
|
||||||
// Client options specifying our own transport
|
|
||||||
policyClientOptions := policy.ClientOptions{
|
|
||||||
Transport: newTransporter(ctx),
|
|
||||||
}
|
|
||||||
// Can't do this with generics (yet)
|
|
||||||
// clientOpt := service.ClientOptions{
|
|
||||||
// ClientOptions: policyClientOptions,
|
|
||||||
// }
|
|
||||||
// So call back to user
|
|
||||||
var clientOpt ClientOptions
|
|
||||||
conf.SetClientOptions(&clientOpt, policyClientOptions)
|
|
||||||
|
|
||||||
// Here we auth by setting one of cred, sharedKeyCred, client or anonymous
|
|
||||||
switch {
|
|
||||||
case opt.EnvAuth:
|
|
||||||
// Read account from environment if needed
|
|
||||||
if opt.Account == "" {
|
|
||||||
opt.Account, _ = os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
|
|
||||||
}
|
|
||||||
// Read credentials from the environment
|
|
||||||
options := azidentity.DefaultAzureCredentialOptions{
|
|
||||||
ClientOptions: policyClientOptions,
|
|
||||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
|
||||||
}
|
|
||||||
r.Cred, err = azidentity.NewDefaultAzureCredential(&options)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("create azure environment credential failed: %w", err)
|
|
||||||
}
|
|
||||||
case opt.UseEmulator:
|
|
||||||
if opt.Account == "" {
|
|
||||||
opt.Account = emulatorAccount
|
|
||||||
}
|
|
||||||
if opt.Key == "" {
|
|
||||||
opt.Key = emulatorAccountKey
|
|
||||||
}
|
|
||||||
if opt.Endpoint == "" {
|
|
||||||
opt.Endpoint = emulatorBlobEndpoint
|
|
||||||
}
|
|
||||||
if conf.NewSharedKeyCredential == nil {
|
|
||||||
return r, errors.New("emulator use not supported")
|
|
||||||
}
|
|
||||||
sharedKeyCred, err = conf.NewSharedKeyCredential(opt.Account, opt.Key)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("create new shared key credential for emulator failed: %w", err)
|
|
||||||
}
|
|
||||||
case opt.Account != "" && opt.Key != "":
|
|
||||||
if conf.NewSharedKeyCredential == nil {
|
|
||||||
return r, errors.New("shared key credentials not supported")
|
|
||||||
}
|
|
||||||
sharedKeyCred, err = conf.NewSharedKeyCredential(opt.Account, opt.Key)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("create new shared key credential failed: %w", err)
|
|
||||||
}
|
|
||||||
case opt.SASURL != "":
|
|
||||||
parts, err := sas.ParseURL(opt.SASURL)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("failed to parse SAS URL: %w", err)
|
|
||||||
}
|
|
||||||
endpoint := opt.SASURL
|
|
||||||
r.Container = parts.ContainerName
|
|
||||||
// Check if we have container level SAS or account level SAS
|
|
||||||
if conf.Blob && r.Container != "" {
|
|
||||||
// Container level SAS
|
|
||||||
if conf.RootContainer != "" && r.Container != conf.RootContainer {
|
|
||||||
return r, fmt.Errorf("container name in SAS URL (%q) and container provided in command (%q) do not match", r.Container, conf.RootContainer)
|
|
||||||
}
|
|
||||||
// Rewrite the endpoint string to be without the container
|
|
||||||
parts.ContainerName = ""
|
|
||||||
endpoint = parts.String()
|
|
||||||
}
|
|
||||||
r.Client, err = conf.NewClientWithNoCredential(endpoint, &clientOpt)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("unable to create SAS URL client: %w", err)
|
|
||||||
}
|
|
||||||
case opt.ConnectionString != "":
|
|
||||||
r.Client, err = conf.NewClientFromConnectionString(opt.ConnectionString, &clientOpt)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("unable to create connection string client: %w", err)
|
|
||||||
}
|
|
||||||
case opt.ClientID != "" && opt.Tenant != "" && opt.ClientSecret != "":
|
|
||||||
// Service principal with client secret
|
|
||||||
options := azidentity.ClientSecretCredentialOptions{
|
|
||||||
ClientOptions: policyClientOptions,
|
|
||||||
}
|
|
||||||
r.Cred, err = azidentity.NewClientSecretCredential(opt.Tenant, opt.ClientID, opt.ClientSecret, &options)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("error creating a client secret credential: %w", err)
|
|
||||||
}
|
|
||||||
case opt.ClientID != "" && opt.Tenant != "" && opt.ClientCertificatePath != "":
|
|
||||||
// Service principal with certificate
|
|
||||||
//
|
|
||||||
// Read the certificate
|
|
||||||
data, err := os.ReadFile(env.ShellExpand(opt.ClientCertificatePath))
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("error reading client certificate file: %w", err)
|
|
||||||
}
|
|
||||||
// NewClientCertificateCredential requires at least one *x509.Certificate, and a
|
|
||||||
// crypto.PrivateKey.
|
|
||||||
//
|
|
||||||
// ParseCertificates returns these given certificate data in PEM or PKCS12 format.
|
|
||||||
// It handles common scenarios but has limitations, for example it doesn't load PEM
|
|
||||||
// encrypted private keys.
|
|
||||||
var password []byte
|
|
||||||
if opt.ClientCertificatePassword != "" {
|
|
||||||
pw, err := obscure.Reveal(opt.Password)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("certificate password decode failed - did you obscure it?: %w", err)
|
|
||||||
}
|
|
||||||
password = []byte(pw)
|
|
||||||
}
|
|
||||||
certs, key, err := azidentity.ParseCertificates(data, password)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("failed to parse client certificate file: %w", err)
|
|
||||||
}
|
|
||||||
options := azidentity.ClientCertificateCredentialOptions{
|
|
||||||
ClientOptions: policyClientOptions,
|
|
||||||
SendCertificateChain: opt.ClientSendCertificateChain,
|
|
||||||
}
|
|
||||||
r.Cred, err = azidentity.NewClientCertificateCredential(
|
|
||||||
opt.Tenant, opt.ClientID, certs, key, &options,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("create azure service principal with client certificate credential failed: %w", err)
|
|
||||||
}
|
|
||||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
|
||||||
// User with username and password
|
|
||||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
|
||||||
options := azidentity.UsernamePasswordCredentialOptions{
|
|
||||||
ClientOptions: policyClientOptions,
|
|
||||||
}
|
|
||||||
password, err := obscure.Reveal(opt.Password)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("user password decode failed - did you obscure it?: %w", err)
|
|
||||||
}
|
|
||||||
r.Cred, err = azidentity.NewUsernamePasswordCredential(
|
|
||||||
opt.Tenant, opt.ClientID, opt.Username, password, &options,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("authenticate user with password failed: %w", err)
|
|
||||||
}
|
|
||||||
case opt.ServicePrincipalFile != "":
|
|
||||||
// Loading service principal credentials from file.
|
|
||||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("error opening service principal credentials file: %w", err)
|
|
||||||
}
|
|
||||||
parsedCreds, err := parseServicePrincipalCredentials(ctx, loadedCreds)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("error parsing service principal credentials file: %w", err)
|
|
||||||
}
|
|
||||||
options := azidentity.ClientSecretCredentialOptions{
|
|
||||||
ClientOptions: policyClientOptions,
|
|
||||||
}
|
|
||||||
r.Cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("error creating a client secret credential: %w", err)
|
|
||||||
}
|
|
||||||
case opt.UseMSI:
|
|
||||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
|
||||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
|
||||||
var b2i = map[bool]int{false: 0, true: 1}
|
|
||||||
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
|
|
||||||
if set > 1 {
|
|
||||||
return r, errors.New("more than one user-assigned identity ID is set")
|
|
||||||
}
|
|
||||||
var options azidentity.ManagedIdentityCredentialOptions
|
|
||||||
switch {
|
|
||||||
case opt.MSIClientID != "":
|
|
||||||
options.ID = azidentity.ClientID(opt.MSIClientID)
|
|
||||||
case opt.MSIObjectID != "":
|
|
||||||
// FIXME this doesn't appear to be in the new SDK?
|
|
||||||
return r, fmt.Errorf("MSI object ID is currently unsupported")
|
|
||||||
case opt.MSIResourceID != "":
|
|
||||||
options.ID = azidentity.ResourceID(opt.MSIResourceID)
|
|
||||||
}
|
|
||||||
r.Cred, err = azidentity.NewManagedIdentityCredential(&options)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("failed to acquire MSI token: %w", err)
|
|
||||||
}
|
|
||||||
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
|
|
||||||
// Workload Identity based authentication
|
|
||||||
var options azidentity.ManagedIdentityCredentialOptions
|
|
||||||
options.ID = azidentity.ClientID(opt.MSIClientID)
|
|
||||||
|
|
||||||
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("failed to acquire MSI token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
getClientAssertions := func(context.Context) (string, error) {
|
|
||||||
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
|
||||||
Scopes: []string{"api://AzureADTokenExchange"},
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return token.Token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
|
|
||||||
r.Cred, err = azidentity.NewClientAssertionCredential(
|
|
||||||
opt.Tenant,
|
|
||||||
opt.ClientID,
|
|
||||||
getClientAssertions,
|
|
||||||
assertOpts)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("failed to acquire client assertion token: %w", err)
|
|
||||||
}
|
|
||||||
case opt.UseAZ:
|
|
||||||
var options = azidentity.AzureCLICredentialOptions{}
|
|
||||||
r.Cred, err = azidentity.NewAzureCLICredential(&options)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
|
||||||
}
|
|
||||||
case opt.Account != "":
|
|
||||||
// Anonymous access
|
|
||||||
r.Anonymous = true
|
|
||||||
default:
|
|
||||||
return r, errors.New("no authentication method configured")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the client if not already created
|
|
||||||
if r.Client == nil {
|
|
||||||
// Work out what the endpoint is if it is still unset
|
|
||||||
if opt.Endpoint == "" {
|
|
||||||
if opt.Account == "" {
|
|
||||||
return r, fmt.Errorf("account must be set: can't make service URL")
|
|
||||||
}
|
|
||||||
u, err := url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, conf.DefaultBaseURL))
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("failed to make azure storage URL from account: %w", err)
|
|
||||||
}
|
|
||||||
opt.Endpoint = u.String()
|
|
||||||
}
|
|
||||||
if sharedKeyCred != nil {
|
|
||||||
// Shared key cred
|
|
||||||
r.Client, err = conf.NewClientWithSharedKeyCredential(opt.Endpoint, sharedKeyCred, &clientOpt)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("create client with shared key failed: %w", err)
|
|
||||||
}
|
|
||||||
r.UsingSharedKeyCred = true
|
|
||||||
} else if r.Cred != nil {
|
|
||||||
// Azidentity cred
|
|
||||||
r.Client, err = conf.NewClient(opt.Endpoint, r.Cred, &clientOpt)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("create client failed: %w", err)
|
|
||||||
}
|
|
||||||
} else if r.Anonymous {
|
|
||||||
// Anonymous public access
|
|
||||||
r.Client, err = conf.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
|
||||||
if err != nil {
|
|
||||||
return r, fmt.Errorf("create public client failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if r.Client == nil {
|
|
||||||
return r, fmt.Errorf("internal error: auth failed to make credentials or client")
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
@@ -11,11 +11,13 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"maps"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"slices"
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
@@ -26,24 +28,27 @@ import (
|
|||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||||
"github.com/rclone/rclone/backend/azureblob/auth"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
"github.com/rclone/rclone/fs/chunksize"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
"github.com/rclone/rclone/fs/list"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/multipart"
|
"github.com/rclone/rclone/lib/multipart"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/pool"
|
"github.com/rclone/rclone/lib/pool"
|
||||||
@@ -63,7 +68,12 @@ const (
|
|||||||
storageDefaultBaseURL = "blob.core.windows.net"
|
storageDefaultBaseURL = "blob.core.windows.net"
|
||||||
defaultChunkSize = 4 * fs.Mebi
|
defaultChunkSize = 4 * fs.Mebi
|
||||||
defaultAccessTier = blob.AccessTier("") // FIXME AccessTierNone
|
defaultAccessTier = blob.AccessTier("") // FIXME AccessTierNone
|
||||||
sasCopyValidity = time.Hour // how long SAS should last when doing server side copy
|
// Default storage account, key and blob endpoint for emulator support,
|
||||||
|
// though it is a base64 key checked in here, it is publicly available secret.
|
||||||
|
emulatorAccount = "devstoreaccount1"
|
||||||
|
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||||
|
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||||
|
sasCopyValidity = time.Hour // how long SAS should last when doing server side copy
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -126,7 +136,199 @@ func init() {
|
|||||||
System: systemMetadataInfo,
|
System: systemMetadataInfo,
|
||||||
Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`,
|
Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`,
|
||||||
},
|
},
|
||||||
Options: slices.Concat(auth.ConfigOptions, []fs.Option{{
|
Options: []fs.Option{{
|
||||||
|
Name: "account",
|
||||||
|
Help: `Azure Storage Account Name.
|
||||||
|
|
||||||
|
Set this to the Azure Storage Account Name in use.
|
||||||
|
|
||||||
|
Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
|
||||||
|
|
||||||
|
If this is blank and if env_auth is set it will be read from the
|
||||||
|
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "env_auth",
|
||||||
|
Help: `Read credentials from runtime (environment variables, CLI or MSI).
|
||||||
|
|
||||||
|
See the [authentication docs](/azureblob#authentication) for full info.`,
|
||||||
|
Default: false,
|
||||||
|
}, {
|
||||||
|
Name: "key",
|
||||||
|
Help: `Storage Account Shared Key.
|
||||||
|
|
||||||
|
Leave blank to use SAS URL or Emulator.`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "sas_url",
|
||||||
|
Help: `SAS URL for container level access only.
|
||||||
|
|
||||||
|
Leave blank if using account/key or Emulator.`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "tenant",
|
||||||
|
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with client secret
|
||||||
|
- Service principal with certificate
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_id",
|
||||||
|
Help: `The ID of the client in use.
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with client secret
|
||||||
|
- Service principal with certificate
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_secret",
|
||||||
|
Help: `One of the service principal's client secrets
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with client secret
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_certificate_path",
|
||||||
|
Help: `Path to a PEM or PKCS12 certificate file including the private key.
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with certificate
|
||||||
|
`,
|
||||||
|
}, {
|
||||||
|
Name: "client_certificate_password",
|
||||||
|
Help: `Password for the certificate file (optional).
|
||||||
|
|
||||||
|
Optionally set this if using
|
||||||
|
- Service principal with certificate
|
||||||
|
|
||||||
|
And the certificate has a password.
|
||||||
|
`,
|
||||||
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_send_certificate_chain",
|
||||||
|
Help: `Send the certificate chain when using certificate auth.
|
||||||
|
|
||||||
|
Specifies whether an authentication request will include an x5c header
|
||||||
|
to support subject name / issuer based authentication. When set to
|
||||||
|
true, authentication requests include the x5c header.
|
||||||
|
|
||||||
|
Optionally set this if using
|
||||||
|
- Service principal with certificate
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "username",
|
||||||
|
Help: `User name (usually an email address)
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "password",
|
||||||
|
Help: `The user's password
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
IsPassword: true,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "service_principal_file",
|
||||||
|
Help: `Path to file containing credentials for use with a service principal.
|
||||||
|
|
||||||
|
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||||
|
|
||||||
|
$ az ad sp create-for-rbac --name "<name>" \
|
||||||
|
--role "Storage Blob Data Owner" \
|
||||||
|
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||||
|
> azure-principal.json
|
||||||
|
|
||||||
|
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
||||||
|
|
||||||
|
It may be more convenient to put the credentials directly into the
|
||||||
|
rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
|
||||||
|
keys instead of setting ` + "`service_principal_file`" + `.
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "disable_instance_discovery",
|
||||||
|
Help: `Skip requesting Microsoft Entra instance metadata
|
||||||
|
|
||||||
|
This should be set true only by applications authenticating in
|
||||||
|
disconnected clouds, or private clouds such as Azure Stack.
|
||||||
|
|
||||||
|
It determines whether rclone requests Microsoft Entra instance
|
||||||
|
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||||
|
authenticating.
|
||||||
|
|
||||||
|
Setting this to true will skip this request, making you responsible
|
||||||
|
for ensuring the configured authority is valid and trustworthy.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_msi",
|
||||||
|
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||||
|
|
||||||
|
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||||
|
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||||
|
|
||||||
|
If the VM(SS) on which this program is running has a system-assigned identity, it will
|
||||||
|
be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
|
||||||
|
the user-assigned identity will be used by default. If the resource has multiple user-assigned
|
||||||
|
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||||
|
msi_client_id, or msi_mi_res_id parameters.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_object_id",
|
||||||
|
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_client_id",
|
||||||
|
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_mi_res_id",
|
||||||
|
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_emulator",
|
||||||
|
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_az",
|
||||||
|
Help: `Use Azure CLI tool az for authentication
|
||||||
|
|
||||||
|
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||||
|
as the sole means of authentication.
|
||||||
|
|
||||||
|
Setting this can be useful if you wish to use the az CLI on a host with
|
||||||
|
a System Managed Identity that you do not want to use.
|
||||||
|
|
||||||
|
Don't set env_auth at the same time.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "endpoint",
|
||||||
|
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
|
Help: "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -339,50 +541,70 @@ rclone does if you know the container exists already.
|
|||||||
Default: "",
|
Default: "",
|
||||||
Exclusive: true,
|
Exclusive: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}}),
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
auth.Options
|
Account string `config:"account"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
EnvAuth bool `config:"env_auth"`
|
||||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
Key string `config:"key"`
|
||||||
CopyConcurrency int `config:"copy_concurrency"`
|
SASURL string `config:"sas_url"`
|
||||||
UseCopyBlob bool `config:"use_copy_blob"`
|
Tenant string `config:"tenant"`
|
||||||
UploadConcurrency int `config:"upload_concurrency"`
|
ClientID string `config:"client_id"`
|
||||||
ListChunkSize uint `config:"list_chunk"`
|
ClientSecret string `config:"client_secret"`
|
||||||
AccessTier string `config:"access_tier"`
|
ClientCertificatePath string `config:"client_certificate_path"`
|
||||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
ClientCertificatePassword string `config:"client_certificate_password"`
|
||||||
DisableCheckSum bool `config:"disable_checksum"`
|
ClientSendCertificateChain bool `config:"client_send_certificate_chain"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Username string `config:"username"`
|
||||||
PublicAccess string `config:"public_access"`
|
Password string `config:"password"`
|
||||||
DirectoryMarkers bool `config:"directory_markers"`
|
ServicePrincipalFile string `config:"service_principal_file"`
|
||||||
NoCheckContainer bool `config:"no_check_container"`
|
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||||
NoHeadObject bool `config:"no_head_object"`
|
UseMSI bool `config:"use_msi"`
|
||||||
DeleteSnapshots string `config:"delete_snapshots"`
|
MSIObjectID string `config:"msi_object_id"`
|
||||||
|
MSIClientID string `config:"msi_client_id"`
|
||||||
|
MSIResourceID string `config:"msi_mi_res_id"`
|
||||||
|
UseAZ bool `config:"use_az"`
|
||||||
|
Endpoint string `config:"endpoint"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
|
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||||
|
CopyConcurrency int `config:"copy_concurrency"`
|
||||||
|
UseCopyBlob bool `config:"use_copy_blob"`
|
||||||
|
UploadConcurrency int `config:"upload_concurrency"`
|
||||||
|
ListChunkSize uint `config:"list_chunk"`
|
||||||
|
AccessTier string `config:"access_tier"`
|
||||||
|
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||||
|
UseEmulator bool `config:"use_emulator"`
|
||||||
|
DisableCheckSum bool `config:"disable_checksum"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
PublicAccess string `config:"public_access"`
|
||||||
|
DirectoryMarkers bool `config:"directory_markers"`
|
||||||
|
NoCheckContainer bool `config:"no_check_container"`
|
||||||
|
NoHeadObject bool `config:"no_head_object"`
|
||||||
|
DeleteSnapshots string `config:"delete_snapshots"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed config options
|
opt Options // parsed config options
|
||||||
ci *fs.ConfigInfo // global config
|
ci *fs.ConfigInfo // global config
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
cntSVCcacheMu sync.Mutex // mutex to protect cntSVCcache
|
cntSVCcacheMu sync.Mutex // mutex to protect cntSVCcache
|
||||||
cntSVCcache map[string]*container.Client // reference to containerClient per container
|
cntSVCcache map[string]*container.Client // reference to containerClient per container
|
||||||
svc *service.Client // client to access azblob
|
svc *service.Client // client to access azblob
|
||||||
cred azcore.TokenCredential // how to generate tokens (may be nil)
|
cred azcore.TokenCredential // how to generate tokens (may be nil)
|
||||||
usingSharedKeyCred bool // set if using shared key credentials
|
sharedKeyCred *service.SharedKeyCredential // shared key credentials (may be nil)
|
||||||
anonymous bool // if this is anonymous access
|
anonymous bool // if this is anonymous access
|
||||||
rootContainer string // container part of root (if any)
|
rootContainer string // container part of root (if any)
|
||||||
rootDirectory string // directory part of root (if any)
|
rootDirectory string // directory part of root (if any)
|
||||||
isLimited bool // if limited to one container
|
isLimited bool // if limited to one container
|
||||||
cache *bucket.Cache // cache for container creation status
|
cache *bucket.Cache // cache for container creation status
|
||||||
pacer *fs.Pacer // To pace and retry the API calls
|
pacer *fs.Pacer // To pace and retry the API calls
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
publicAccess container.PublicAccessType // Container Public Access Level
|
publicAccess container.PublicAccessType // Container Public Access Level
|
||||||
|
|
||||||
// user delegation cache
|
// user delegation cache
|
||||||
userDelegationMu sync.Mutex
|
userDelegationMu sync.Mutex
|
||||||
@@ -545,12 +767,49 @@ func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type servicePrincipalCredentials struct {
|
||||||
|
AppID string `json:"appId"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Tenant string `json:"tenant"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseServicePrincipalCredentials unmarshals a service principal credentials JSON file as generated by az cli.
|
||||||
|
func parseServicePrincipalCredentials(ctx context.Context, credentialsData []byte) (*servicePrincipalCredentials, error) {
|
||||||
|
var spCredentials servicePrincipalCredentials
|
||||||
|
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
|
||||||
|
}
|
||||||
|
// TODO: support certificate credentials
|
||||||
|
// Validate all fields present
|
||||||
|
if spCredentials.AppID == "" || spCredentials.Password == "" || spCredentials.Tenant == "" {
|
||||||
|
return nil, fmt.Errorf("missing fields in credentials file")
|
||||||
|
}
|
||||||
|
return &spCredentials, nil
|
||||||
|
}
|
||||||
|
|
||||||
// setRoot changes the root of the Fs
|
// setRoot changes the root of the Fs
|
||||||
func (f *Fs) setRoot(root string) {
|
func (f *Fs) setRoot(root string) {
|
||||||
f.root = parsePath(root)
|
f.root = parsePath(root)
|
||||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wrap the http.Transport to satisfy the Transporter interface
|
||||||
|
type transporter struct {
|
||||||
|
http.RoundTripper
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a new transporter
|
||||||
|
func newTransporter(ctx context.Context) transporter {
|
||||||
|
return transporter{
|
||||||
|
RoundTripper: fshttp.NewTransport(ctx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do sends the HTTP request and returns the HTTP response or error.
|
||||||
|
func (tr transporter) Do(req *http.Request) (*http.Response, error) {
|
||||||
|
return tr.RoundTripper.RoundTrip(req)
|
||||||
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
@@ -610,32 +869,255 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
fs.Debugf(f, "Using directory markers")
|
fs.Debugf(f, "Using directory markers")
|
||||||
}
|
}
|
||||||
|
|
||||||
conf := auth.NewClientOpts[service.Client, service.ClientOptions, service.SharedKeyCredential]{
|
// Client options specifying our own transport
|
||||||
DefaultBaseURL: storageDefaultBaseURL,
|
policyClientOptions := policy.ClientOptions{
|
||||||
RootContainer: f.rootContainer,
|
Transport: newTransporter(ctx),
|
||||||
Blob: true,
|
|
||||||
NewClient: service.NewClient,
|
|
||||||
NewClientFromConnectionString: service.NewClientFromConnectionString,
|
|
||||||
NewClientWithNoCredential: service.NewClientWithNoCredential,
|
|
||||||
NewClientWithSharedKeyCredential: service.NewClientWithSharedKeyCredential,
|
|
||||||
NewSharedKeyCredential: service.NewSharedKeyCredential,
|
|
||||||
SetClientOptions: func(options *service.ClientOptions, policyClientOptions policy.ClientOptions) {
|
|
||||||
options.ClientOptions = policyClientOptions
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
res, err := auth.NewClient(ctx, conf, &opt.Options)
|
clientOpt := service.ClientOptions{
|
||||||
if err != nil {
|
ClientOptions: policyClientOptions,
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
f.svc = res.Client
|
|
||||||
f.cred = res.Cred
|
|
||||||
f.usingSharedKeyCred = res.UsingSharedKeyCred
|
|
||||||
f.anonymous = res.Anonymous
|
|
||||||
|
|
||||||
// if using Container level SAS put the container client into the cache
|
// Here we auth by setting one of f.cred, f.sharedKeyCred, f.svc or f.anonymous
|
||||||
if opt.SASURL != "" && res.Container != "" {
|
switch {
|
||||||
_ = f.cntSVC(res.Container)
|
case opt.EnvAuth:
|
||||||
f.isLimited = true
|
// Read account from environment if needed
|
||||||
|
if opt.Account == "" {
|
||||||
|
opt.Account, _ = os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
|
||||||
|
}
|
||||||
|
// Read credentials from the environment
|
||||||
|
options := azidentity.DefaultAzureCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||||
|
}
|
||||||
|
f.cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create azure environment credential failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.UseEmulator:
|
||||||
|
if opt.Account == "" {
|
||||||
|
opt.Account = emulatorAccount
|
||||||
|
}
|
||||||
|
if opt.Key == "" {
|
||||||
|
opt.Key = emulatorAccountKey
|
||||||
|
}
|
||||||
|
if opt.Endpoint == "" {
|
||||||
|
opt.Endpoint = emulatorBlobEndpoint
|
||||||
|
}
|
||||||
|
f.sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create new shared key credential for emulator failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.Account != "" && opt.Key != "":
|
||||||
|
f.sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.SASURL != "":
|
||||||
|
parts, err := sas.ParseURL(opt.SASURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse SAS URL: %w", err)
|
||||||
|
}
|
||||||
|
endpoint := opt.SASURL
|
||||||
|
containerName := parts.ContainerName
|
||||||
|
// Check if we have container level SAS or account level SAS
|
||||||
|
if containerName != "" {
|
||||||
|
// Container level SAS
|
||||||
|
if f.rootContainer != "" && containerName != f.rootContainer {
|
||||||
|
return nil, fmt.Errorf("container name in SAS URL (%q) and container provided in command (%q) do not match", containerName, f.rootContainer)
|
||||||
|
}
|
||||||
|
// Rewrite the endpoint string to be without the container
|
||||||
|
parts.ContainerName = ""
|
||||||
|
endpoint = parts.String()
|
||||||
|
}
|
||||||
|
f.svc, err = service.NewClientWithNoCredential(endpoint, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to create SAS URL client: %w", err)
|
||||||
|
}
|
||||||
|
// if using Container level SAS put the container client into the cache
|
||||||
|
if containerName != "" {
|
||||||
|
_ = f.cntSVC(containerName)
|
||||||
|
f.isLimited = true
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.ClientSecret != "":
|
||||||
|
// Service principal with client secret
|
||||||
|
options := azidentity.ClientSecretCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
}
|
||||||
|
f.cred, err = azidentity.NewClientSecretCredential(opt.Tenant, opt.ClientID, opt.ClientSecret, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating a client secret credential: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.ClientCertificatePath != "":
|
||||||
|
// Service principal with certificate
|
||||||
|
//
|
||||||
|
// Read the certificate
|
||||||
|
data, err := os.ReadFile(env.ShellExpand(opt.ClientCertificatePath))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading client certificate file: %w", err)
|
||||||
|
}
|
||||||
|
// NewClientCertificateCredential requires at least one *x509.Certificate, and a
|
||||||
|
// crypto.PrivateKey.
|
||||||
|
//
|
||||||
|
// ParseCertificates returns these given certificate data in PEM or PKCS12 format.
|
||||||
|
// It handles common scenarios but has limitations, for example it doesn't load PEM
|
||||||
|
// encrypted private keys.
|
||||||
|
var password []byte
|
||||||
|
if opt.ClientCertificatePassword != "" {
|
||||||
|
pw, err := obscure.Reveal(opt.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("certificate password decode failed - did you obscure it?: %w", err)
|
||||||
|
}
|
||||||
|
password = []byte(pw)
|
||||||
|
}
|
||||||
|
certs, key, err := azidentity.ParseCertificates(data, password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse client certificate file: %w", err)
|
||||||
|
}
|
||||||
|
options := azidentity.ClientCertificateCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
SendCertificateChain: opt.ClientSendCertificateChain,
|
||||||
|
}
|
||||||
|
f.cred, err = azidentity.NewClientCertificateCredential(
|
||||||
|
opt.Tenant, opt.ClientID, certs, key, &options,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create azure service principal with client certificate credential failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||||
|
// User with username and password
|
||||||
|
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||||
|
options := azidentity.UsernamePasswordCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
}
|
||||||
|
password, err := obscure.Reveal(opt.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("user password decode failed - did you obscure it?: %w", err)
|
||||||
|
}
|
||||||
|
f.cred, err = azidentity.NewUsernamePasswordCredential(
|
||||||
|
opt.Tenant, opt.ClientID, opt.Username, password, &options,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("authenticate user with password failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ServicePrincipalFile != "":
|
||||||
|
// Loading service principal credentials from file.
|
||||||
|
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
|
||||||
|
}
|
||||||
|
parsedCreds, err := parseServicePrincipalCredentials(ctx, loadedCreds)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing service principal credentials file: %w", err)
|
||||||
|
}
|
||||||
|
options := azidentity.ClientSecretCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
}
|
||||||
|
f.cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating a client secret credential: %w", err)
|
||||||
|
}
|
||||||
|
case opt.UseMSI:
|
||||||
|
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||||
|
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||||
|
var b2i = map[bool]int{false: 0, true: 1}
|
||||||
|
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
|
||||||
|
if set > 1 {
|
||||||
|
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||||
|
}
|
||||||
|
var options azidentity.ManagedIdentityCredentialOptions
|
||||||
|
switch {
|
||||||
|
case opt.MSIClientID != "":
|
||||||
|
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||||
|
case opt.MSIObjectID != "":
|
||||||
|
// FIXME this doesn't appear to be in the new SDK?
|
||||||
|
return nil, fmt.Errorf("MSI object ID is currently unsupported")
|
||||||
|
case opt.MSIResourceID != "":
|
||||||
|
options.ID = azidentity.ResourceID(opt.MSIResourceID)
|
||||||
|
}
|
||||||
|
f.cred, err = azidentity.NewManagedIdentityCredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
|
||||||
|
// Workload Identity based authentication
|
||||||
|
var options azidentity.ManagedIdentityCredentialOptions
|
||||||
|
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||||
|
|
||||||
|
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
getClientAssertions := func(context.Context) (string, error) {
|
||||||
|
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
||||||
|
Scopes: []string{"api://AzureADTokenExchange"},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return token.Token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
|
||||||
|
f.cred, err = azidentity.NewClientAssertionCredential(
|
||||||
|
opt.Tenant,
|
||||||
|
opt.ClientID,
|
||||||
|
getClientAssertions,
|
||||||
|
assertOpts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
|
||||||
|
}
|
||||||
|
case opt.UseAZ:
|
||||||
|
var options = azidentity.AzureCLICredentialOptions{}
|
||||||
|
f.cred, err = azidentity.NewAzureCLICredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||||
|
}
|
||||||
|
case opt.Account != "":
|
||||||
|
// Anonymous access
|
||||||
|
f.anonymous = true
|
||||||
|
default:
|
||||||
|
return nil, errors.New("no authentication method configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the client if not already created
|
||||||
|
if f.svc == nil {
|
||||||
|
// Work out what the endpoint is if it is still unset
|
||||||
|
if opt.Endpoint == "" {
|
||||||
|
if opt.Account == "" {
|
||||||
|
return nil, fmt.Errorf("account must be set: can't make service URL")
|
||||||
|
}
|
||||||
|
u, err := url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, storageDefaultBaseURL))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to make azure storage URL from account: %w", err)
|
||||||
|
}
|
||||||
|
opt.Endpoint = u.String()
|
||||||
|
}
|
||||||
|
if f.sharedKeyCred != nil {
|
||||||
|
// Shared key cred
|
||||||
|
f.svc, err = service.NewClientWithSharedKeyCredential(opt.Endpoint, f.sharedKeyCred, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create client with shared key failed: %w", err)
|
||||||
|
}
|
||||||
|
} else if f.cred != nil {
|
||||||
|
// Azidentity cred
|
||||||
|
f.svc, err = service.NewClient(opt.Endpoint, f.cred, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create client failed: %w", err)
|
||||||
|
}
|
||||||
|
} else if f.anonymous {
|
||||||
|
// Anonymous public access
|
||||||
|
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create public client failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f.svc == nil {
|
||||||
|
return nil, fmt.Errorf("internal error: auth failed to make credentials or client")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||||
@@ -730,8 +1212,8 @@ func parseXMsTags(s string) (map[string]string, error) {
|
|||||||
return map[string]string{}, nil
|
return map[string]string{}, nil
|
||||||
}
|
}
|
||||||
out := make(map[string]string)
|
out := make(map[string]string)
|
||||||
parts := strings.SplitSeq(s, ",")
|
parts := strings.Split(s, ",")
|
||||||
for p := range parts {
|
for _, p := range parts {
|
||||||
p = strings.TrimSpace(p)
|
p = strings.TrimSpace(p)
|
||||||
if p == "" {
|
if p == "" {
|
||||||
continue
|
continue
|
||||||
@@ -894,7 +1376,9 @@ func assembleCopyParams(ctx context.Context, f *Fs, src fs.Object, srcProps *blo
|
|||||||
if meta == nil {
|
if meta == nil {
|
||||||
meta = make(map[string]*string, len(userMeta))
|
meta = make(map[string]*string, len(userMeta))
|
||||||
}
|
}
|
||||||
maps.Copy(meta, userMeta)
|
for k, v := range userMeta {
|
||||||
|
meta[k] = v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Apply tags if any
|
// Apply tags if any
|
||||||
if len(mappedTags) > 0 {
|
if len(mappedTags) > 0 {
|
||||||
@@ -991,7 +1475,9 @@ func (o *Object) applyMappedMetadata(ctx context.Context, src fs.ObjectInfo, ui
|
|||||||
if o.tags == nil {
|
if o.tags == nil {
|
||||||
o.tags = make(map[string]string, len(tags))
|
o.tags = make(map[string]string, len(tags))
|
||||||
}
|
}
|
||||||
maps.Copy(o.tags, tags)
|
for k, v := range tags {
|
||||||
|
o.tags[k] = v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if mappedModTime != nil {
|
if mappedModTime != nil {
|
||||||
@@ -1683,7 +2169,7 @@ func (o *Object) getAuth(ctx context.Context, noAuth bool) (srcURL string, err e
|
|||||||
|
|
||||||
// Append the SAS to the URL
|
// Append the SAS to the URL
|
||||||
srcURL = srcBlobSVC.URL() + "?" + queryParameters.Encode()
|
srcURL = srcBlobSVC.URL() + "?" + queryParameters.Encode()
|
||||||
case f.usingSharedKeyCred:
|
case f.sharedKeyCred != nil:
|
||||||
// Generate a short lived SAS URL if using shared key credentials
|
// Generate a short lived SAS URL if using shared key credentials
|
||||||
expiry := time.Now().Add(sasCopyValidity)
|
expiry := time.Now().Add(sasCopyValidity)
|
||||||
sasOptions := blob.GetSASURLOptions{}
|
sasOptions := blob.GetSASURLOptions{}
|
||||||
@@ -1856,7 +2342,9 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
|||||||
// Apply tags and post-copy headers only when mapping requested changes
|
// Apply tags and post-copy headers only when mapping requested changes
|
||||||
if len(tags) > 0 {
|
if len(tags) > 0 {
|
||||||
options.BlobTags = make(map[string]string, len(tags))
|
options.BlobTags = make(map[string]string, len(tags))
|
||||||
maps.Copy(options.BlobTags, tags)
|
for k, v := range tags {
|
||||||
|
options.BlobTags[k] = v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if hadMapping {
|
if hadMapping {
|
||||||
// Only set metadata explicitly when mapping was requested; otherwise
|
// Only set metadata explicitly when mapping was requested; otherwise
|
||||||
@@ -2057,7 +2545,9 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|||||||
|
|
||||||
// Merge user metadata (already lower-cased keys)
|
// Merge user metadata (already lower-cased keys)
|
||||||
metadataMu.Lock()
|
metadataMu.Lock()
|
||||||
maps.Copy(m, o.meta)
|
for k, v := range o.meta {
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
metadataMu.Unlock()
|
metadataMu.Unlock()
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
|
|||||||
@@ -29,28 +29,36 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"slices"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share"
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share"
|
||||||
"github.com/rclone/rclone/backend/azureblob/auth"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/list"
|
"github.com/rclone/rclone/fs/list"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -65,12 +73,199 @@ func init() {
|
|||||||
Name: "azurefiles",
|
Name: "azurefiles",
|
||||||
Description: "Microsoft Azure Files",
|
Description: "Microsoft Azure Files",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: slices.Concat(auth.ConfigOptions, []fs.Option{{
|
Options: []fs.Option{{
|
||||||
|
Name: "account",
|
||||||
|
Help: `Azure Storage Account Name.
|
||||||
|
|
||||||
|
Set this to the Azure Storage Account Name in use.
|
||||||
|
|
||||||
|
Leave blank to use SAS URL or connection string, otherwise it needs to be set.
|
||||||
|
|
||||||
|
If this is blank and if env_auth is set it will be read from the
|
||||||
|
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
Name: "share_name",
|
Name: "share_name",
|
||||||
Help: `Azure Files Share Name.
|
Help: `Azure Files Share Name.
|
||||||
|
|
||||||
This is required and is the name of the share to access.
|
This is required and is the name of the share to access.
|
||||||
`,
|
`,
|
||||||
|
}, {
|
||||||
|
Name: "env_auth",
|
||||||
|
Help: `Read credentials from runtime (environment variables, CLI or MSI).
|
||||||
|
|
||||||
|
See the [authentication docs](/azurefiles#authentication) for full info.`,
|
||||||
|
Default: false,
|
||||||
|
}, {
|
||||||
|
Name: "key",
|
||||||
|
Help: `Storage Account Shared Key.
|
||||||
|
|
||||||
|
Leave blank to use SAS URL or connection string.`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "sas_url",
|
||||||
|
Help: `SAS URL.
|
||||||
|
|
||||||
|
Leave blank if using account/key or connection string.`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "connection_string",
|
||||||
|
Help: `Azure Files Connection String.`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "tenant",
|
||||||
|
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with client secret
|
||||||
|
- Service principal with certificate
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_id",
|
||||||
|
Help: `The ID of the client in use.
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with client secret
|
||||||
|
- Service principal with certificate
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_secret",
|
||||||
|
Help: `One of the service principal's client secrets
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with client secret
|
||||||
|
`,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_certificate_path",
|
||||||
|
Help: `Path to a PEM or PKCS12 certificate file including the private key.
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- Service principal with certificate
|
||||||
|
`,
|
||||||
|
}, {
|
||||||
|
Name: "client_certificate_password",
|
||||||
|
Help: `Password for the certificate file (optional).
|
||||||
|
|
||||||
|
Optionally set this if using
|
||||||
|
- Service principal with certificate
|
||||||
|
|
||||||
|
And the certificate has a password.
|
||||||
|
`,
|
||||||
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "client_send_certificate_chain",
|
||||||
|
Help: `Send the certificate chain when using certificate auth.
|
||||||
|
|
||||||
|
Specifies whether an authentication request will include an x5c header
|
||||||
|
to support subject name / issuer based authentication. When set to
|
||||||
|
true, authentication requests include the x5c header.
|
||||||
|
|
||||||
|
Optionally set this if using
|
||||||
|
- Service principal with certificate
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "username",
|
||||||
|
Help: `User name (usually an email address)
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "password",
|
||||||
|
Help: `The user's password
|
||||||
|
|
||||||
|
Set this if using
|
||||||
|
- User with username and password
|
||||||
|
`,
|
||||||
|
IsPassword: true,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "service_principal_file",
|
||||||
|
Help: `Path to file containing credentials for use with a service principal.
|
||||||
|
|
||||||
|
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
|
||||||
|
|
||||||
|
$ az ad sp create-for-rbac --name "<name>" \
|
||||||
|
--role "Storage Files Data Owner" \
|
||||||
|
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
|
||||||
|
> azure-principal.json
|
||||||
|
|
||||||
|
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to files data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
||||||
|
|
||||||
|
**NB** this section needs updating for Azure Files - pull requests appreciated!
|
||||||
|
|
||||||
|
It may be more convenient to put the credentials directly into the
|
||||||
|
rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
|
||||||
|
keys instead of setting ` + "`service_principal_file`" + `.
|
||||||
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_msi",
|
||||||
|
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||||
|
|
||||||
|
When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
|
||||||
|
to authenticate to Azure Storage instead of a SAS token or account key.
|
||||||
|
|
||||||
|
If the VM(SS) on which this program is running has a system-assigned identity, it will
|
||||||
|
be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
|
||||||
|
the user-assigned identity will be used by default. If the resource has multiple user-assigned
|
||||||
|
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||||
|
msi_client_id, or msi_mi_res_id parameters.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_object_id",
|
||||||
|
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_client_id",
|
||||||
|
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "msi_mi_res_id",
|
||||||
|
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "disable_instance_discovery",
|
||||||
|
Help: `Skip requesting Microsoft Entra instance metadata
|
||||||
|
This should be set true only by applications authenticating in
|
||||||
|
disconnected clouds, or private clouds such as Azure Stack.
|
||||||
|
It determines whether rclone requests Microsoft Entra instance
|
||||||
|
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||||
|
authenticating.
|
||||||
|
Setting this to true will skip this request, making you responsible
|
||||||
|
for ensuring the configured authority is valid and trustworthy.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_az",
|
||||||
|
Help: `Use Azure CLI tool az for authentication
|
||||||
|
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||||
|
as the sole means of authentication.
|
||||||
|
Setting this can be useful if you wish to use the az CLI on a host with
|
||||||
|
a System Managed Identity that you do not want to use.
|
||||||
|
Don't set env_auth at the same time.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "endpoint",
|
||||||
|
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: `Upload chunk size.
|
Help: `Upload chunk size.
|
||||||
@@ -128,18 +323,38 @@ You will need this much free space in the share as the file will be this size te
|
|||||||
encoder.EncodeInvalidUtf8 |
|
encoder.EncodeInvalidUtf8 |
|
||||||
encoder.EncodeCtl | encoder.EncodeDel |
|
encoder.EncodeCtl | encoder.EncodeDel |
|
||||||
encoder.EncodeDot | encoder.EncodeRightPeriod),
|
encoder.EncodeDot | encoder.EncodeRightPeriod),
|
||||||
}}),
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
auth.Options
|
Account string `config:"account"`
|
||||||
ShareName string `config:"share_name"`
|
ShareName string `config:"share_name"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
EnvAuth bool `config:"env_auth"`
|
||||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
Key string `config:"key"`
|
||||||
UploadConcurrency int `config:"upload_concurrency"`
|
SASURL string `config:"sas_url"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
ConnectionString string `config:"connection_string"`
|
||||||
|
Tenant string `config:"tenant"`
|
||||||
|
ClientID string `config:"client_id"`
|
||||||
|
ClientSecret string `config:"client_secret"`
|
||||||
|
ClientCertificatePath string `config:"client_certificate_path"`
|
||||||
|
ClientCertificatePassword string `config:"client_certificate_password"`
|
||||||
|
ClientSendCertificateChain bool `config:"client_send_certificate_chain"`
|
||||||
|
Username string `config:"username"`
|
||||||
|
Password string `config:"password"`
|
||||||
|
ServicePrincipalFile string `config:"service_principal_file"`
|
||||||
|
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||||
|
UseMSI bool `config:"use_msi"`
|
||||||
|
MSIObjectID string `config:"msi_object_id"`
|
||||||
|
MSIClientID string `config:"msi_client_id"`
|
||||||
|
MSIResourceID string `config:"msi_mi_res_id"`
|
||||||
|
UseAZ bool `config:"use_az"`
|
||||||
|
Endpoint string `config:"endpoint"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
|
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||||
|
UploadConcurrency int `config:"upload_concurrency"`
|
||||||
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a root directory inside a share. The root directory can be ""
|
// Fs represents a root directory inside a share. The root directory can be ""
|
||||||
@@ -162,29 +377,266 @@ type Object struct {
|
|||||||
contentType string // content type if known
|
contentType string // content type if known
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wrap the http.Transport to satisfy the Transporter interface
|
||||||
|
type transporter struct {
|
||||||
|
http.RoundTripper
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a new transporter
|
||||||
|
func newTransporter(ctx context.Context) transporter {
|
||||||
|
return transporter{
|
||||||
|
RoundTripper: fshttp.NewTransport(ctx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do sends the HTTP request and returns the HTTP response or error.
|
||||||
|
func (tr transporter) Do(req *http.Request) (*http.Response, error) {
|
||||||
|
return tr.RoundTripper.RoundTrip(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
type servicePrincipalCredentials struct {
|
||||||
|
AppID string `json:"appId"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Tenant string `json:"tenant"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseServicePrincipalCredentials unmarshals a service principal credentials JSON file as generated by az cli.
|
||||||
|
func parseServicePrincipalCredentials(ctx context.Context, credentialsData []byte) (*servicePrincipalCredentials, error) {
|
||||||
|
var spCredentials servicePrincipalCredentials
|
||||||
|
if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
|
||||||
|
}
|
||||||
|
// TODO: support certificate credentials
|
||||||
|
// Validate all fields present
|
||||||
|
if spCredentials.AppID == "" || spCredentials.Password == "" || spCredentials.Tenant == "" {
|
||||||
|
return nil, fmt.Errorf("missing fields in credentials file")
|
||||||
|
}
|
||||||
|
return &spCredentials, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Factored out from NewFs so that it can be tested with opt *Options and without m configmap.Mapper
|
// Factored out from NewFs so that it can be tested with opt *Options and without m configmap.Mapper
|
||||||
func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.Fs, error) {
|
func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.Fs, error) {
|
||||||
conf := auth.NewClientOpts[service.Client, service.ClientOptions, service.SharedKeyCredential]{
|
// Client options specifying our own transport
|
||||||
DefaultBaseURL: storageDefaultBaseURL,
|
policyClientOptions := policy.ClientOptions{
|
||||||
NewClient: service.NewClient,
|
Transport: newTransporter(ctx),
|
||||||
NewClientFromConnectionString: service.NewClientFromConnectionString,
|
|
||||||
NewClientWithNoCredential: service.NewClientWithNoCredential,
|
|
||||||
NewClientWithSharedKeyCredential: service.NewClientWithSharedKeyCredential,
|
|
||||||
NewSharedKeyCredential: service.NewSharedKeyCredential,
|
|
||||||
SetClientOptions: func(options *service.ClientOptions, policyClientOptions policy.ClientOptions) {
|
|
||||||
options.ClientOptions = policyClientOptions
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
res, err := auth.NewClient(ctx, conf, &opt.Options)
|
backup := service.ShareTokenIntentBackup
|
||||||
if err != nil {
|
clientOpt := service.ClientOptions{
|
||||||
return nil, err
|
ClientOptions: policyClientOptions,
|
||||||
|
FileRequestIntent: &backup,
|
||||||
}
|
}
|
||||||
// f.svc = res.Client
|
|
||||||
// f.cred = res.Cred
|
|
||||||
// f.sharedKeyCred = res.SharedKeyCred
|
|
||||||
// f.anonymous = res.Anonymous
|
|
||||||
|
|
||||||
shareClient := res.Client.NewShareClient(opt.ShareName)
|
// Here we auth by setting one of cred, sharedKeyCred or f.client
|
||||||
|
var (
|
||||||
|
cred azcore.TokenCredential
|
||||||
|
sharedKeyCred *service.SharedKeyCredential
|
||||||
|
client *service.Client
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case opt.EnvAuth:
|
||||||
|
// Read account from environment if needed
|
||||||
|
if opt.Account == "" {
|
||||||
|
opt.Account, _ = os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
|
||||||
|
}
|
||||||
|
// Read credentials from the environment
|
||||||
|
options := azidentity.DefaultAzureCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||||
|
}
|
||||||
|
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create azure environment credential failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.Account != "" && opt.Key != "":
|
||||||
|
sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.UseAZ:
|
||||||
|
options := azidentity.AzureCLICredentialOptions{}
|
||||||
|
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||||
|
fmt.Println(cred)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||||
|
}
|
||||||
|
case opt.SASURL != "":
|
||||||
|
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to create SAS URL client: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ConnectionString != "":
|
||||||
|
client, err = service.NewClientFromConnectionString(opt.ConnectionString, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to create connection string client: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.ClientSecret != "":
|
||||||
|
// Service principal with client secret
|
||||||
|
options := azidentity.ClientSecretCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
}
|
||||||
|
cred, err = azidentity.NewClientSecretCredential(opt.Tenant, opt.ClientID, opt.ClientSecret, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating a client secret credential: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.ClientCertificatePath != "":
|
||||||
|
// Service principal with certificate
|
||||||
|
//
|
||||||
|
// Read the certificate
|
||||||
|
data, err := os.ReadFile(env.ShellExpand(opt.ClientCertificatePath))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading client certificate file: %w", err)
|
||||||
|
}
|
||||||
|
// NewClientCertificateCredential requires at least one *x509.Certificate, and a
|
||||||
|
// crypto.PrivateKey.
|
||||||
|
//
|
||||||
|
// ParseCertificates returns these given certificate data in PEM or PKCS12 format.
|
||||||
|
// It handles common scenarios but has limitations, for example it doesn't load PEM
|
||||||
|
// encrypted private keys.
|
||||||
|
var password []byte
|
||||||
|
if opt.ClientCertificatePassword != "" {
|
||||||
|
pw, err := obscure.Reveal(opt.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("certificate password decode failed - did you obscure it?: %w", err)
|
||||||
|
}
|
||||||
|
password = []byte(pw)
|
||||||
|
}
|
||||||
|
certs, key, err := azidentity.ParseCertificates(data, password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse client certificate file: %w", err)
|
||||||
|
}
|
||||||
|
options := azidentity.ClientCertificateCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
SendCertificateChain: opt.ClientSendCertificateChain,
|
||||||
|
}
|
||||||
|
cred, err = azidentity.NewClientCertificateCredential(
|
||||||
|
opt.Tenant, opt.ClientID, certs, key, &options,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create azure service principal with client certificate credential failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||||
|
// User with username and password
|
||||||
|
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||||
|
options := azidentity.UsernamePasswordCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
}
|
||||||
|
password, err := obscure.Reveal(opt.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("user password decode failed - did you obscure it?: %w", err)
|
||||||
|
}
|
||||||
|
cred, err = azidentity.NewUsernamePasswordCredential(
|
||||||
|
opt.Tenant, opt.ClientID, opt.Username, password, &options,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("authenticate user with password failed: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ServicePrincipalFile != "":
|
||||||
|
// Loading service principal credentials from file.
|
||||||
|
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
|
||||||
|
}
|
||||||
|
parsedCreds, err := parseServicePrincipalCredentials(ctx, loadedCreds)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing service principal credentials file: %w", err)
|
||||||
|
}
|
||||||
|
options := azidentity.ClientSecretCredentialOptions{
|
||||||
|
ClientOptions: policyClientOptions,
|
||||||
|
}
|
||||||
|
cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating a client secret credential: %w", err)
|
||||||
|
}
|
||||||
|
case opt.UseMSI:
|
||||||
|
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||||
|
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||||
|
b2i := map[bool]int{false: 0, true: 1}
|
||||||
|
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
|
||||||
|
if set > 1 {
|
||||||
|
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||||
|
}
|
||||||
|
var options azidentity.ManagedIdentityCredentialOptions
|
||||||
|
switch {
|
||||||
|
case opt.MSIClientID != "":
|
||||||
|
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||||
|
case opt.MSIObjectID != "":
|
||||||
|
// FIXME this doesn't appear to be in the new SDK?
|
||||||
|
return nil, fmt.Errorf("MSI object ID is currently unsupported")
|
||||||
|
case opt.MSIResourceID != "":
|
||||||
|
options.ID = azidentity.ResourceID(opt.MSIResourceID)
|
||||||
|
}
|
||||||
|
cred, err = azidentity.NewManagedIdentityCredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
|
}
|
||||||
|
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
|
||||||
|
// Workload Identity based authentication
|
||||||
|
var options azidentity.ManagedIdentityCredentialOptions
|
||||||
|
options.ID = azidentity.ClientID(opt.MSIClientID)
|
||||||
|
|
||||||
|
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
getClientAssertions := func(context.Context) (string, error) {
|
||||||
|
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
||||||
|
Scopes: []string{"api://AzureADTokenExchange"},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return token.Token, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
|
||||||
|
cred, err = azidentity.NewClientAssertionCredential(
|
||||||
|
opt.Tenant,
|
||||||
|
opt.ClientID,
|
||||||
|
getClientAssertions,
|
||||||
|
assertOpts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, errors.New("no authentication method configured")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the client if not already created
|
||||||
|
if client == nil {
|
||||||
|
// Work out what the endpoint is if it is still unset
|
||||||
|
if opt.Endpoint == "" {
|
||||||
|
if opt.Account == "" {
|
||||||
|
return nil, fmt.Errorf("account must be set: can't make service URL")
|
||||||
|
}
|
||||||
|
u, err := url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, storageDefaultBaseURL))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to make azure storage URL from account: %w", err)
|
||||||
|
}
|
||||||
|
opt.Endpoint = u.String()
|
||||||
|
}
|
||||||
|
if sharedKeyCred != nil {
|
||||||
|
// Shared key cred
|
||||||
|
client, err = service.NewClientWithSharedKeyCredential(opt.Endpoint, sharedKeyCred, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create client with shared key failed: %w", err)
|
||||||
|
}
|
||||||
|
} else if cred != nil {
|
||||||
|
// Azidentity cred
|
||||||
|
client, err = service.NewClient(opt.Endpoint, cred, &clientOpt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create client failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if client == nil {
|
||||||
|
return nil, fmt.Errorf("internal error: auth failed to make credentials or client")
|
||||||
|
}
|
||||||
|
|
||||||
|
shareClient := client.NewShareClient(opt.ShareName)
|
||||||
svc := shareClient.NewRootDirectoryClient()
|
svc := shareClient.NewRootDirectoryClient()
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
shareClient: shareClient,
|
shareClient: shareClient,
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/azureblob/auth"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@@ -29,28 +28,22 @@ func (f *Fs) InternalTestAuth(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "ConnectionString",
|
name: "ConnectionString",
|
||||||
options: &Options{
|
options: &Options{
|
||||||
ShareName: shareName,
|
ShareName: shareName,
|
||||||
Options: auth.Options{
|
ConnectionString: "",
|
||||||
ConnectionString: "",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "AccountAndKey",
|
name: "AccountAndKey",
|
||||||
options: &Options{
|
options: &Options{
|
||||||
ShareName: shareName,
|
ShareName: shareName,
|
||||||
Options: auth.Options{
|
Account: "",
|
||||||
Account: "",
|
Key: "",
|
||||||
Key: "",
|
|
||||||
},
|
|
||||||
}},
|
}},
|
||||||
{
|
{
|
||||||
name: "SASUrl",
|
name: "SASUrl",
|
||||||
options: &Options{
|
options: &Options{
|
||||||
ShareName: shareName,
|
ShareName: shareName,
|
||||||
Options: auth.Options{
|
SASURL: "",
|
||||||
SASURL: "",
|
|
||||||
},
|
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -287,13 +287,13 @@ type StartLargeFileRequest struct {
|
|||||||
|
|
||||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||||
type StartLargeFileResponse struct {
|
type StartLargeFileResponse struct {
|
||||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ The DOI provider can be set when rclone does not automatically recognize a suppo
|
|||||||
Name: "doi_resolver_api_url",
|
Name: "doi_resolver_api_url",
|
||||||
Help: `The URL of the DOI resolver API to use.
|
Help: `The URL of the DOI resolver API to use.
|
||||||
|
|
||||||
The DOI resolver can be set for testing or for cases when the canonical DOI resolver API cannot be used.
|
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||||
|
|
||||||
Defaults to "https://doi.org/api".`,
|
Defaults to "https://doi.org/api".`,
|
||||||
Required: false,
|
Required: false,
|
||||||
|
|||||||
@@ -1,247 +0,0 @@
|
|||||||
// Package api has type definitions for drime
|
|
||||||
//
|
|
||||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
|
||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Types of things in Item
|
|
||||||
const (
|
|
||||||
ItemTypeFolder = "folder"
|
|
||||||
)
|
|
||||||
|
|
||||||
// User information
|
|
||||||
type User struct {
|
|
||||||
Email string `json:"email"`
|
|
||||||
ID json.Number `json:"id"`
|
|
||||||
Avatar string `json:"avatar"`
|
|
||||||
ModelType string `json:"model_type"`
|
|
||||||
OwnsEntry bool `json:"owns_entry"`
|
|
||||||
EntryPermissions []any `json:"entry_permissions"`
|
|
||||||
DisplayName string `json:"display_name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permissions for a file
|
|
||||||
type Permissions struct {
|
|
||||||
FilesUpdate bool `json:"files.update"`
|
|
||||||
FilesCreate bool `json:"files.create"`
|
|
||||||
FilesDownload bool `json:"files.download"`
|
|
||||||
FilesDelete bool `json:"files.delete"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by /drive/file-entries
|
|
||||||
type Item struct {
|
|
||||||
ID json.Number `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Description any `json:"description"`
|
|
||||||
FileName string `json:"file_name"`
|
|
||||||
Mime string `json:"mime"`
|
|
||||||
Color any `json:"color"`
|
|
||||||
Backup bool `json:"backup"`
|
|
||||||
Tracked int `json:"tracked"`
|
|
||||||
FileSize int64 `json:"file_size"`
|
|
||||||
UserID json.Number `json:"user_id"`
|
|
||||||
ParentID json.Number `json:"parent_id"`
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
|
||||||
DeletedAt any `json:"deleted_at"`
|
|
||||||
IsDeleted int `json:"is_deleted"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
DiskPrefix any `json:"disk_prefix"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Extension any `json:"extension"`
|
|
||||||
FileHash any `json:"file_hash"`
|
|
||||||
Public bool `json:"public"`
|
|
||||||
Thumbnail bool `json:"thumbnail"`
|
|
||||||
MuxStatus any `json:"mux_status"`
|
|
||||||
ThumbnailURL any `json:"thumbnail_url"`
|
|
||||||
WorkspaceID int `json:"workspace_id"`
|
|
||||||
IsEncrypted int `json:"is_encrypted"`
|
|
||||||
Iv any `json:"iv"`
|
|
||||||
VaultID any `json:"vault_id"`
|
|
||||||
OwnerID int `json:"owner_id"`
|
|
||||||
Hash string `json:"hash"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
Users []User `json:"users"`
|
|
||||||
Tags []any `json:"tags"`
|
|
||||||
Permissions Permissions `json:"permissions"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Listing response
|
|
||||||
type Listing struct {
|
|
||||||
CurrentPage int `json:"current_page"`
|
|
||||||
Data []Item `json:"data"`
|
|
||||||
From int `json:"from"`
|
|
||||||
LastPage int `json:"last_page"`
|
|
||||||
NextPage int `json:"next_page"`
|
|
||||||
PerPage int `json:"per_page"`
|
|
||||||
PrevPage int `json:"prev_page"`
|
|
||||||
To int `json:"to"`
|
|
||||||
Total int `json:"total"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadResponse for a file
|
|
||||||
type UploadResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
FileEntry Item `json:"fileEntry"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFolderRequest for a folder
|
|
||||||
type CreateFolderRequest struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
ParentID json.Number `json:"parentId,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFolderResponse for a folder
|
|
||||||
type CreateFolderResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Folder Item `json:"folder"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error is returned from drime when things go wrong
|
|
||||||
type Error struct {
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface
|
|
||||||
func (e Error) Error() string {
|
|
||||||
out := fmt.Sprintf("Error %q", e.Message)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Error satisfies the error interface
|
|
||||||
var _ error = (*Error)(nil)
|
|
||||||
|
|
||||||
// DeleteRequest is the input to DELETE /file-entries
|
|
||||||
type DeleteRequest struct {
|
|
||||||
EntryIDs []string `json:"entryIds"`
|
|
||||||
DeleteForever bool `json:"deleteForever"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteResponse is the input to DELETE /file-entries
|
|
||||||
type DeleteResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Errors map[string]string `json:"errors"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/
|
|
||||||
type UpdateItemRequest struct {
|
|
||||||
Name string `json:"name,omitempty"`
|
|
||||||
Description string `json:"description,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateItemResponse is returned by PUT /file-entries/{id}/
|
|
||||||
type UpdateItemResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
FileEntry Item `json:"fileEntry"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveRequest is the input to /file-entries/move
|
|
||||||
type MoveRequest struct {
|
|
||||||
EntryIDs []string `json:"entryIds"`
|
|
||||||
DestinationID string `json:"destinationId"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveResponse is returned by POST /file-entries/move
|
|
||||||
type MoveResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Entries []Item `json:"entries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyRequest is the input to /file-entries/duplicate
|
|
||||||
type CopyRequest struct {
|
|
||||||
EntryIDs []string `json:"entryIds"`
|
|
||||||
DestinationID string `json:"destinationId"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyResponse is returned by POST /file-entries/duplicate
|
|
||||||
type CopyResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Entries []Item `json:"entries"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartCreateRequest is the input of POST /s3/multipart/create
|
|
||||||
type MultiPartCreateRequest struct {
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Mime string `json:"mime"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Extension string `json:"extension"`
|
|
||||||
ParentID json.Number `json:"parent_id"`
|
|
||||||
RelativePath string `json:"relativePath"`
|
|
||||||
WorkspaceID string `json:"workspaceId,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartCreateResponse is returned by POST /s3/multipart/create
|
|
||||||
type MultiPartCreateResponse struct {
|
|
||||||
UploadID string `json:"uploadId"`
|
|
||||||
Key string `json:"key"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompletedPart Type for completed parts when making a multipart upload.
|
|
||||||
type CompletedPart struct {
|
|
||||||
ETag string `json:"ETag"`
|
|
||||||
PartNumber int32 `json:"PartNumber"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls
|
|
||||||
type MultiPartGetURLsRequest struct {
|
|
||||||
UploadID string `json:"uploadId"`
|
|
||||||
Key string `json:"key"`
|
|
||||||
PartNumbers []int `json:"partNumbers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls
|
|
||||||
type MultiPartGetURLsResponse struct {
|
|
||||||
URLs []struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
PartNumber int32 `json:"partNumber"`
|
|
||||||
} `json:"urls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartCompleteRequest is the input to POST /s3/multipart/complete
|
|
||||||
type MultiPartCompleteRequest struct {
|
|
||||||
UploadID string `json:"uploadId"`
|
|
||||||
Key string `json:"key"`
|
|
||||||
Parts []CompletedPart `json:"parts"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartCompleteResponse is the result of POST /s3/multipart/complete
|
|
||||||
type MultiPartCompleteResponse struct {
|
|
||||||
Location string `json:"location"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartEntriesRequest is the input to POST /s3/entries
|
|
||||||
type MultiPartEntriesRequest struct {
|
|
||||||
ClientMime string `json:"clientMime"`
|
|
||||||
ClientName string `json:"clientName"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
ClientExtension string `json:"clientExtension"`
|
|
||||||
ParentID json.Number `json:"parent_id"`
|
|
||||||
RelativePath string `json:"relativePath"`
|
|
||||||
WorkspaceID string `json:"workspaceId,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartEntriesResponse is the result of POST /s3/entries
|
|
||||||
type MultiPartEntriesResponse struct {
|
|
||||||
FileEntry Item `json:"fileEntry"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiPartAbort is the input of POST /s3/multipart/abort
|
|
||||||
type MultiPartAbort struct {
|
|
||||||
UploadID string `json:"uploadId"`
|
|
||||||
Key string `json:"key"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SpaceUsageResponse is returned by GET /user/space-usage
|
|
||||||
type SpaceUsageResponse struct {
|
|
||||||
Used int64 `json:"used"`
|
|
||||||
Available int64 `json:"available"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
SEO any `json:"seo"`
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,33 +0,0 @@
|
|||||||
// Drime filesystem interface
|
|
||||||
package drime
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestDrime:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MinChunkSize: minChunkSize,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadChunkSize(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
|
||||||
)
|
|
||||||
@@ -733,17 +733,6 @@ two accounts.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
Default: rwOff,
|
Default: rwOff,
|
||||||
Examples: rwExamples,
|
Examples: rwExamples,
|
||||||
}, {
|
|
||||||
Name: "metadata_enforce_expansive_access",
|
|
||||||
Help: `Whether the request should enforce expansive access rules.
|
|
||||||
|
|
||||||
From Feb 2026 this flag will be set by default so this flag can be used for
|
|
||||||
testing before then.
|
|
||||||
|
|
||||||
See: https://developers.google.com/workspace/drive/api/guides/limited-expansive-access
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -823,7 +812,6 @@ type Options struct {
|
|||||||
MetadataOwner rwChoice `config:"metadata_owner"`
|
MetadataOwner rwChoice `config:"metadata_owner"`
|
||||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||||
MetadataLabels rwChoice `config:"metadata_labels"`
|
MetadataLabels rwChoice `config:"metadata_labels"`
|
||||||
EnforceExpansiveAccess bool `config:"metadata_enforce_expansive_access"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
EnvAuth bool `config:"env_auth"`
|
EnvAuth bool `config:"env_auth"`
|
||||||
}
|
}
|
||||||
@@ -3104,7 +3092,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
_, err = f.svc.Permissions.Create(id, permission).
|
_, err = f.svc.Permissions.Create(id, permission).
|
||||||
Fields("").
|
Fields("").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
|
||||||
Context(ctx).Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
@@ -3409,11 +3396,9 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
|||||||
return nil, fmt.Errorf("failed to find source dir: %w", err)
|
return nil, fmt.Errorf("failed to find source dir: %w", err)
|
||||||
}
|
}
|
||||||
isDir = true
|
isDir = true
|
||||||
} else if do, ok := srcObj.(fs.IDer); ok {
|
|
||||||
// source was a file
|
|
||||||
srcID = do.ID()
|
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("unknown source object: %T", srcObj)
|
// source was a file
|
||||||
|
srcID = srcObj.(*Object).id
|
||||||
}
|
}
|
||||||
srcID = actualID(srcID) // link to underlying object not to shortcut
|
srcID = actualID(srcID) // link to underlying object not to shortcut
|
||||||
|
|
||||||
|
|||||||
@@ -643,44 +643,6 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|||||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
assert.Contains(t, subFs.lastQuery, timeQuery)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/SingleQuoteFolder
|
|
||||||
func (f *Fs) InternalTestSingleQuoteFolder(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Test various folder names containing single quotes
|
|
||||||
for _, name := range []string{
|
|
||||||
"'",
|
|
||||||
"''",
|
|
||||||
"'a'",
|
|
||||||
"it's a test",
|
|
||||||
} {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
dir := "singleQuoteTest/" + name
|
|
||||||
err := f.Mkdir(ctx, dir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
err := f.Rmdir(ctx, dir)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
entries, err := f.List(ctx, "singleQuoteTest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
found := false
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.Remote() == dir {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.True(t, found, "directory %q not found in listing", name)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.Rmdir(ctx, "singleQuoteTest")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// These tests all depend on each other so run them as nested tests
|
// These tests all depend on each other so run them as nested tests
|
||||||
t.Run("DocumentImport", func(t *testing.T) {
|
t.Run("DocumentImport", func(t *testing.T) {
|
||||||
@@ -700,7 +662,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
||||||
t.Run("Query", f.InternalTestQuery)
|
t.Run("Query", f.InternalTestQuery)
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||||
t.Run("SingleQuoteFolder", f.InternalTestSingleQuoteFolder)
|
|
||||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -149,7 +149,6 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
|||||||
_, err := f.svc.Permissions.Create(info.Id, perm).
|
_, err := f.svc.Permissions.Create(info.Id, perm).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
SendNotificationEmail(false).
|
SendNotificationEmail(false).
|
||||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
|
||||||
Context(ctx).Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
@@ -484,7 +483,6 @@ func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err
|
|||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
TransferOwnership(true).
|
TransferOwnership(true).
|
||||||
// SendNotificationEmail(false). - required apparently!
|
// SendNotificationEmail(false). - required apparently!
|
||||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
|
||||||
Context(ctx).Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -3,19 +3,6 @@ package api
|
|||||||
|
|
||||||
import "encoding/json"
|
import "encoding/json"
|
||||||
|
|
||||||
// MultipartInitResponse represents the response from multipart/init.
|
|
||||||
type MultipartInitResponse struct {
|
|
||||||
Status int `json:"status"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Result struct {
|
|
||||||
UploadID string `json:"upload_id"`
|
|
||||||
SessID string `json:"sess_id"`
|
|
||||||
Server string `json:"server"`
|
|
||||||
FolderID int64 `json:"folder_id"`
|
|
||||||
ObjectPath string `json:"object_path"`
|
|
||||||
} `json:"result"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateFolderResponse represents the response for creating a folder.
|
// CreateFolderResponse represents the response for creating a folder.
|
||||||
type CreateFolderResponse struct {
|
type CreateFolderResponse struct {
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
|
|||||||
@@ -21,11 +21,6 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
defaultUploadCutoff = fs.SizeSuffix(500 * 1024 * 1024)
|
|
||||||
defaultChunkSize = fs.SizeSuffix(64 * 1024 * 1024)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register the backend with Rclone
|
// Register the backend with Rclone
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -38,17 +33,6 @@ func init() {
|
|||||||
Required: true,
|
Required: true,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Name: "upload_cutoff",
|
|
||||||
Help: "Cutoff for switching to chunked upload. Any files larger than this will be uploaded in chunks of chunk_size.",
|
|
||||||
Default: defaultUploadCutoff,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "chunk_size",
|
|
||||||
Help: "Chunk size to use for uploading. Used for multipart uploads.",
|
|
||||||
Default: defaultChunkSize,
|
|
||||||
Advanced: true,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -88,10 +72,8 @@ func init() {
|
|||||||
|
|
||||||
// Options defines the configuration for the FileLu backend
|
// Options defines the configuration for the FileLu backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Key string `config:"key"`
|
Key string `config:"key"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents the FileLu file system
|
// Fs represents the FileLu file system
|
||||||
@@ -207,6 +189,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.deleteFolder(ctx, fullPath)
|
return f.deleteFolder(ctx, fullPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List returns a list of files and folders
|
||||||
// List returns a list of files and folders for the given directory
|
// List returns a list of files and folders for the given directory
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||||
// Compose full path for API call
|
// Compose full path for API call
|
||||||
@@ -267,11 +250,23 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|||||||
|
|
||||||
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
o := &Object{
|
if src.Size() == 0 {
|
||||||
fs: f,
|
return nil, fs.ErrorCantUploadEmptyFiles
|
||||||
remote: src.Remote(),
|
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
|
||||||
|
err := f.uploadFile(ctx, in, src.Remote())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newObject := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: src.Remote(),
|
||||||
|
size: src.Size(),
|
||||||
|
modTime: src.ModTime(ctx),
|
||||||
|
}
|
||||||
|
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
|
||||||
|
return newObject, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move moves the file to the specified location
|
// Move moves the file to the specified location
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
package filelu
|
package filelu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -14,82 +16,40 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
// multipartInit starts a new multipart upload and returns server details.
|
|
||||||
func (f *Fs) multipartInit(ctx context.Context, folderPath, filename string) (*api.MultipartInitResponse, error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/multipart/init",
|
|
||||||
Parameters: url.Values{
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
"filename": {filename},
|
|
||||||
"folder_path": {folderPath},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var result api.MultipartInitResponse
|
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
||||||
return fserrors.ShouldRetry(err), err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if result.Status != 200 {
|
|
||||||
return nil, fmt.Errorf("multipart init error: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// completeMultipart finalizes the multipart upload on the file server.
|
|
||||||
func (f *Fs) completeMultipart(ctx context.Context, server string, uploadID string, sessID string, objectPath string) error {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", server, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("X-RC-Upload-Id", uploadID)
|
|
||||||
req.Header.Set("X-Sess-ID", sessID)
|
|
||||||
req.Header.Set("X-Object-Path", objectPath)
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != 202 {
|
|
||||||
body, _ := io.ReadAll(resp.Body)
|
|
||||||
return fmt.Errorf("completeMultipart failed %d: %s", resp.StatusCode, string(body))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createFolder creates a folder at the specified path.
|
// createFolder creates a folder at the specified path.
|
||||||
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
||||||
encodedDir := f.fromStandardPath(dirPath)
|
encodedDir := f.fromStandardPath(dirPath)
|
||||||
|
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
|
||||||
|
f.endpoint,
|
||||||
|
url.QueryEscape(encodedDir),
|
||||||
|
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
|
||||||
|
)
|
||||||
|
|
||||||
opts := rest.Opts{
|
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||||
Method: "GET",
|
if err != nil {
|
||||||
Path: "/folder/create",
|
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||||
Parameters: url.Values{
|
|
||||||
"folder_path": {encodedDir},
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var result api.CreateFolderResponse
|
var resp *http.Response
|
||||||
|
result := api.CreateFolderResponse{}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
var innerErr error
|
||||||
return fserrors.ShouldRetry(err), err
|
resp, innerErr = f.client.Do(req)
|
||||||
|
return fserrors.ShouldRetry(innerErr), innerErr
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("request failed: %w", err)
|
return nil, fmt.Errorf("request failed: %w", err)
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||||
|
}
|
||||||
if result.Status != 200 {
|
if result.Status != 200 {
|
||||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||||
}
|
}
|
||||||
@@ -101,29 +61,44 @@ func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolde
|
|||||||
// getFolderList List both files and folders in a directory.
|
// getFolderList List both files and folders in a directory.
|
||||||
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
||||||
encodedDir := f.fromStandardPath(path)
|
encodedDir := f.fromStandardPath(path)
|
||||||
|
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
|
||||||
|
f.endpoint,
|
||||||
|
url.QueryEscape(encodedDir),
|
||||||
|
url.QueryEscape(f.opt.Key),
|
||||||
|
)
|
||||||
|
|
||||||
opts := rest.Opts{
|
var body []byte
|
||||||
Method: "GET",
|
|
||||||
Path: "/folder/list",
|
|
||||||
Parameters: url.Values{
|
|
||||||
"folder_path": {encodedDir},
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var response api.FolderListResponse
|
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &response)
|
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
||||||
}
|
}
|
||||||
return false, nil
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
body, err = io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error reading response body: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return shouldRetryHTTP(resp.StatusCode), nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var response api.FolderListResponse
|
||||||
|
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||||
|
}
|
||||||
if response.Status != 200 {
|
if response.Status != 200 {
|
||||||
if strings.Contains(response.Msg, "Folder not found") {
|
if strings.Contains(response.Msg, "Folder not found") {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
@@ -140,28 +115,42 @@ func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListRes
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &response, nil
|
return &response, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteFolder deletes a folder at the specified path.
|
// deleteFolder deletes a folder at the specified path.
|
||||||
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
||||||
fullPath = f.fromStandardPath(fullPath)
|
fullPath = f.fromStandardPath(fullPath)
|
||||||
|
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
|
||||||
opts := rest.Opts{
|
f.endpoint,
|
||||||
Method: "GET",
|
url.QueryEscape(fullPath),
|
||||||
Path: "/folder/delete",
|
url.QueryEscape(f.opt.Key),
|
||||||
Parameters: url.Values{
|
)
|
||||||
"folder_path": {fullPath},
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
delResp := api.DeleteFolderResponse{}
|
delResp := api.DeleteFolderResponse{}
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &delResp)
|
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(body, &delResp); err != nil {
|
||||||
|
return false, fmt.Errorf("error decoding delete response: %w", err)
|
||||||
|
}
|
||||||
if delResp.Status != 200 {
|
if delResp.Status != 200 {
|
||||||
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
||||||
}
|
}
|
||||||
@@ -178,27 +167,38 @@ func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
|||||||
// getDirectLink of file from FileLu to download.
|
// getDirectLink of file from FileLu to download.
|
||||||
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
||||||
filePath = f.fromStandardPath(filePath)
|
filePath = f.fromStandardPath(filePath)
|
||||||
|
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
|
||||||
opts := rest.Opts{
|
f.endpoint,
|
||||||
Method: "GET",
|
url.QueryEscape(filePath),
|
||||||
Path: "/file/direct_link",
|
url.QueryEscape(f.opt.Key),
|
||||||
Parameters: url.Values{
|
)
|
||||||
"file_path": {filePath},
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
result := api.FileDirectLinkResponse{}
|
result := api.FileDirectLinkResponse{}
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return false, fmt.Errorf("error decoding response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if result.Status != 200 {
|
if result.Status != 200 {
|
||||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||||
}
|
}
|
||||||
return false, nil
|
|
||||||
|
return shouldRetryHTTP(resp.StatusCode), nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", 0, err
|
return "", 0, err
|
||||||
@@ -210,31 +210,39 @@ func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64,
|
|||||||
// deleteFile deletes a file based on filePath
|
// deleteFile deletes a file based on filePath
|
||||||
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
||||||
filePath = f.fromStandardPath(filePath)
|
filePath = f.fromStandardPath(filePath)
|
||||||
|
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
|
||||||
opts := rest.Opts{
|
f.endpoint,
|
||||||
Method: "GET",
|
url.QueryEscape(filePath),
|
||||||
Path: "/file/remove",
|
url.QueryEscape(f.opt.Key),
|
||||||
Parameters: url.Values{
|
)
|
||||||
"file_path": {filePath},
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
result := api.DeleteFileResponse{}
|
result := api.DeleteFileResponse{}
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(err), fmt.Errorf("failed to delete file: %w", err)
|
return false, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := f.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return false, fmt.Errorf("error decoding response: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Status != 200 {
|
if result.Status != 200 {
|
||||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, nil
|
return shouldRetryHTTP(resp.StatusCode), nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -267,28 +275,46 @@ func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, erro
|
|||||||
|
|
||||||
// getFileInfo retrieves file information based on file code
|
// getFileInfo retrieves file information based on file code
|
||||||
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
||||||
|
u, _ := url.Parse(f.endpoint + "/file/info2")
|
||||||
|
q := u.Query()
|
||||||
|
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
|
||||||
|
q.Set("key", f.opt.Key)
|
||||||
|
u.RawQuery = q.Encode()
|
||||||
|
|
||||||
opts := rest.Opts{
|
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
|
||||||
Method: "GET",
|
|
||||||
Path: "/file/info2",
|
|
||||||
Parameters: url.Values{
|
|
||||||
"file_code": {fileCode},
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
result := api.FileInfoResponse{}
|
|
||||||
|
|
||||||
|
var body []byte
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
||||||
}
|
}
|
||||||
return false, nil
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
body, err = io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("error reading response body: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return shouldRetryHTTP(resp.StatusCode), nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
result := api.FileInfoResponse{}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
if result.Status != 200 || len(result.Result) == 0 {
|
if result.Status != 200 || len(result.Result) == 0 {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package filelu
|
package filelu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
@@ -14,108 +13,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// multipartUpload uploads a file in fixed-size chunks using the multipart API.
|
|
||||||
func (f *Fs) multipartUpload(ctx context.Context, in io.Reader, remote string) error {
|
|
||||||
dir := path.Dir(remote)
|
|
||||||
if dir == "." {
|
|
||||||
dir = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if dir != "" {
|
|
||||||
_ = f.Mkdir(ctx, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
folder := strings.Trim(dir, "/")
|
|
||||||
if folder != "" {
|
|
||||||
folder = "/" + folder
|
|
||||||
}
|
|
||||||
|
|
||||||
file := path.Base(remote)
|
|
||||||
|
|
||||||
initResp, err := f.multipartInit(ctx, folder, file)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("multipart init failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
uploadID := initResp.Result.UploadID
|
|
||||||
sessID := initResp.Result.SessID
|
|
||||||
server := initResp.Result.Server
|
|
||||||
objectPath := initResp.Result.ObjectPath
|
|
||||||
|
|
||||||
chunkSize := int(f.opt.ChunkSize)
|
|
||||||
buf := make([]byte, 0, chunkSize)
|
|
||||||
tmp := make([]byte, 1024*1024)
|
|
||||||
partNo := 1
|
|
||||||
|
|
||||||
for {
|
|
||||||
n, errRead := in.Read(tmp)
|
|
||||||
if n > 0 {
|
|
||||||
buf = append(buf, tmp[:n]...)
|
|
||||||
|
|
||||||
// If buffer reached chunkSize, upload a full part
|
|
||||||
if len(buf) >= chunkSize {
|
|
||||||
err = f.uploadPart(ctx, server, uploadID, sessID, objectPath, partNo, bytes.NewReader(buf))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("upload part %d failed: %w", partNo, err)
|
|
||||||
}
|
|
||||||
partNo++
|
|
||||||
buf = buf[:0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if errRead == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if errRead != nil {
|
|
||||||
return fmt.Errorf("read failed: %w", errRead)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buf) > 0 {
|
|
||||||
err = f.uploadPart(ctx, server, uploadID, sessID, objectPath, partNo, bytes.NewReader(buf))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("upload part %d failed: %w", partNo, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.completeMultipart(ctx, server, uploadID, sessID, objectPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("complete multipart failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// uploadPart sends a single multipart chunk to the upload server.
|
|
||||||
func (f *Fs) uploadPart(ctx context.Context, server, uploadID, sessID, objectPath string, partNo int, r io.Reader) error {
|
|
||||||
url := fmt.Sprintf("%s?partNumber=%d&uploadId=%s", server, partNo, uploadID)
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "PUT", url, r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Header.Set("X-RC-Upload-Id", uploadID)
|
|
||||||
req.Header.Set("X-RC-Part-No", fmt.Sprintf("%d", partNo))
|
|
||||||
req.Header.Set("X-Sess-ID", sessID)
|
|
||||||
req.Header.Set("X-Object-Path", objectPath)
|
|
||||||
|
|
||||||
resp, err := f.client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
return fmt.Errorf("uploadPart failed: %s", resp.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// uploadFile uploads a file to FileLu
|
// uploadFile uploads a file to FileLu
|
||||||
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
||||||
directory := path.Dir(fileFullPath)
|
directory := path.Dir(fileFullPath)
|
||||||
@@ -169,15 +68,9 @@ func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getUploadServer gets the upload server URL with proper key authentication
|
||||||
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
||||||
|
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/upload/server",
|
|
||||||
Parameters: url.Values{
|
|
||||||
"key": {f.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var result struct {
|
var result struct {
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
@@ -187,21 +80,36 @@ func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to create request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
||||||
}
|
}
|
||||||
return false, nil
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return false, fmt.Errorf("error decoding response: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.Status != 200 {
|
||||||
|
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return shouldRetryHTTP(resp.StatusCode), nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Status != 200 {
|
|
||||||
return "", "", fmt.Errorf("API error: %s", result.Msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.Result, result.SessID, nil
|
return result.Result, result.SessID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
package filelu
|
package filelu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -14,7 +16,6 @@ import (
|
|||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Object describes a FileLu object
|
// Object describes a FileLu object
|
||||||
@@ -87,7 +88,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wrap the response body to handle offset and count
|
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
||||||
@@ -109,25 +109,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
|||||||
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
if offset > 0 {
|
// Wrap the response body to handle offset and count
|
||||||
_, err = io.CopyN(io.Discard, resp.Body, offset)
|
currentContents, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = resp.Body.Close()
|
return false, fmt.Errorf("failed to read response body: %w", err)
|
||||||
return false, fmt.Errorf("failed to skip offset: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if count > 0 {
|
if offset > 0 {
|
||||||
reader = struct {
|
if offset > int64(len(currentContents)) {
|
||||||
io.Reader
|
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
|
||||||
io.Closer
|
|
||||||
}{
|
|
||||||
Reader: io.LimitReader(resp.Body, count),
|
|
||||||
Closer: resp.Body,
|
|
||||||
}
|
}
|
||||||
} else {
|
currentContents = currentContents[offset:]
|
||||||
reader = resp.Body
|
|
||||||
}
|
}
|
||||||
|
if count > 0 && count < int64(len(currentContents)) {
|
||||||
|
currentContents = currentContents[:count]
|
||||||
|
}
|
||||||
|
reader = io.NopCloser(bytes.NewReader(currentContents))
|
||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
@@ -140,23 +137,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
|||||||
|
|
||||||
// Update updates the object with new data
|
// Update updates the object with new data
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
size := src.Size()
|
if src.Size() <= 0 {
|
||||||
|
return fs.ErrorCantUploadEmptyFiles
|
||||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
|
||||||
err := o.fs.uploadFile(ctx, in, o.remote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fullPath := path.Join(o.fs.root, o.remote)
|
|
||||||
err := o.fs.multipartUpload(ctx, in, fullPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to upload file: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
o.size = size
|
err := o.fs.uploadFile(ctx, in, o.remote)
|
||||||
o.modTime = src.ModTime(ctx)
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to upload file: %w", err)
|
||||||
|
}
|
||||||
|
o.size = src.Size()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,14 +183,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
return "", fmt.Errorf("no valid file code found in the remote path")
|
return "", fmt.Errorf("no valid file code found in the remote path")
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
|
||||||
Method: "GET",
|
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
|
||||||
Path: "/file/info",
|
|
||||||
Parameters: url.Values{
|
|
||||||
"file_code": {fileCode},
|
|
||||||
"key": {o.fs.opt.Key},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var result struct {
|
var result struct {
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
@@ -210,18 +193,29 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
Hash string `json:"hash"`
|
Hash string `json:"hash"`
|
||||||
} `json:"result"`
|
} `json:"result"`
|
||||||
}
|
}
|
||||||
|
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
resp, err := o.fs.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(err), err
|
return shouldRetry(err), err
|
||||||
}
|
}
|
||||||
return false, nil
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return shouldRetryHTTP(resp.StatusCode), nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Status != 200 || len(result.Result) == 0 {
|
if result.Status != 200 || len(result.Result) == 0 {
|
||||||
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,14 +0,0 @@
|
|||||||
package filen
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestFilen:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -204,12 +204,6 @@ Example:
|
|||||||
Help: `URL for HTTP CONNECT proxy
|
Help: `URL for HTTP CONNECT proxy
|
||||||
|
|
||||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||||
|
|
||||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
http://myUser:myPass@proxyhostname.example.com:8000
|
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -898,7 +892,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
resultchan := make(chan []*ftp.Entry, 1)
|
resultchan := make(chan []*ftp.Entry, 1)
|
||||||
errchan := make(chan error, 1)
|
errchan := make(chan error, 1)
|
||||||
go func(c *ftp.ServerConn) {
|
go func() {
|
||||||
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
|
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -906,7 +900,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
resultchan <- result
|
resultchan <- result
|
||||||
}(c)
|
}()
|
||||||
|
|
||||||
// Wait for List for up to Timeout seconds
|
// Wait for List for up to Timeout seconds
|
||||||
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
||||||
|
|||||||
@@ -116,8 +116,8 @@ type Date struct {
|
|||||||
type DateFilter struct {
|
type DateFilter struct {
|
||||||
Dates []Date `json:"dates,omitempty"`
|
Dates []Date `json:"dates,omitempty"`
|
||||||
Ranges []struct {
|
Ranges []struct {
|
||||||
StartDate Date `json:"startDate"`
|
StartDate Date `json:"startDate,omitempty"`
|
||||||
EndDate Date `json:"endDate"`
|
EndDate Date `json:"endDate,omitempty"`
|
||||||
} `json:"ranges,omitempty"`
|
} `json:"ranges,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -583,7 +583,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
entriesMu.Unlock()
|
entriesMu.Unlock()
|
||||||
}
|
}
|
||||||
for range checkers {
|
for range checkers {
|
||||||
wg.Go(func() {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
for remote := range in {
|
for remote := range in {
|
||||||
file := &Object{
|
file := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -599,7 +601,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
}
|
}
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
isDir := name[len(name)-1] == '/'
|
isDir := name[len(name)-1] == '/'
|
||||||
|
|||||||
@@ -599,7 +599,7 @@ type UpdateFileInfo struct {
|
|||||||
Signature string `json:"signature,omitempty"`
|
Signature string `json:"signature,omitempty"`
|
||||||
Size int64 `json:"size,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
WrappingKey string `json:"wrapping_key,omitempty"`
|
WrappingKey string `json:"wrapping_key,omitempty"`
|
||||||
} `json:"data"`
|
} `json:"data,omitempty"`
|
||||||
DocumentID string `json:"document_id"`
|
DocumentID string `json:"document_id"`
|
||||||
FileFlags FileFlags `json:"file_flags"`
|
FileFlags FileFlags `json:"file_flags"`
|
||||||
Mtime int64 `json:"mtime"`
|
Mtime int64 `json:"mtime"`
|
||||||
@@ -849,10 +849,10 @@ type DriveItem struct {
|
|||||||
NumberOfItems int64 `json:"numberOfItems"`
|
NumberOfItems int64 `json:"numberOfItems"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Extension string `json:"extension,omitempty"`
|
Extension string `json:"extension,omitempty"`
|
||||||
DateModified time.Time `json:"dateModified"`
|
DateModified time.Time `json:"dateModified,omitempty"`
|
||||||
DateChanged time.Time `json:"dateChanged"`
|
DateChanged time.Time `json:"dateChanged,omitempty"`
|
||||||
Size int64 `json:"size,omitempty"`
|
Size int64 `json:"size,omitempty"`
|
||||||
LastOpenTime time.Time `json:"lastOpenTime"`
|
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
|
||||||
Urls struct {
|
Urls struct {
|
||||||
URLDownload string `json:"url_download"`
|
URLDownload string `json:"url_download"`
|
||||||
} `json:"urls"`
|
} `json:"urls"`
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara
|
|||||||
|
|
||||||
response := &UploadResult{}
|
response := &UploadResult{}
|
||||||
|
|
||||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream")
|
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||||
|
|||||||
@@ -1,246 +0,0 @@
|
|||||||
// Package internxt provides authentication handling
|
|
||||||
package internxt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v5"
|
|
||||||
internxtauth "github.com/internxt/rclone-adapter/auth"
|
|
||||||
internxtconfig "github.com/internxt/rclone-adapter/config"
|
|
||||||
sdkerrors "github.com/internxt/rclone-adapter/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type userInfo struct {
|
|
||||||
RootFolderID string
|
|
||||||
Bucket string
|
|
||||||
BridgeUser string
|
|
||||||
UserID string
|
|
||||||
}
|
|
||||||
|
|
||||||
type userInfoConfig struct {
|
|
||||||
Token string
|
|
||||||
}
|
|
||||||
|
|
||||||
// getUserInfo fetches user metadata from the refresh endpoint
|
|
||||||
func getUserInfo(ctx context.Context, cfg *userInfoConfig) (*userInfo, error) {
|
|
||||||
// Call the refresh endpoint to get all user metadata
|
|
||||||
refreshCfg := internxtconfig.NewDefaultToken(cfg.Token)
|
|
||||||
resp, err := internxtauth.RefreshToken(ctx, refreshCfg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to fetch user info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.User.Bucket == "" {
|
|
||||||
return nil, errors.New("API response missing user.bucket")
|
|
||||||
}
|
|
||||||
if resp.User.RootFolderID == "" {
|
|
||||||
return nil, errors.New("API response missing user.rootFolderId")
|
|
||||||
}
|
|
||||||
if resp.User.BridgeUser == "" {
|
|
||||||
return nil, errors.New("API response missing user.bridgeUser")
|
|
||||||
}
|
|
||||||
if resp.User.UserID == "" {
|
|
||||||
return nil, errors.New("API response missing user.userId")
|
|
||||||
}
|
|
||||||
|
|
||||||
info := &userInfo{
|
|
||||||
RootFolderID: resp.User.RootFolderID,
|
|
||||||
Bucket: resp.User.Bucket,
|
|
||||||
BridgeUser: resp.User.BridgeUser,
|
|
||||||
UserID: resp.User.UserID,
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "User info: rootFolderId=%s, bucket=%s",
|
|
||||||
info.RootFolderID, info.Bucket)
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseJWTExpiry extracts the expiry time from a JWT token string
|
|
||||||
func parseJWTExpiry(tokenString string) (time.Time, error) {
|
|
||||||
parser := jwt.NewParser(jwt.WithoutClaimsValidation())
|
|
||||||
token, _, err := parser.ParseUnverified(tokenString, jwt.MapClaims{})
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, fmt.Errorf("failed to parse token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
claims, ok := token.Claims.(jwt.MapClaims)
|
|
||||||
if !ok {
|
|
||||||
return time.Time{}, errors.New("invalid token claims")
|
|
||||||
}
|
|
||||||
|
|
||||||
exp, ok := claims["exp"].(float64)
|
|
||||||
if !ok {
|
|
||||||
return time.Time{}, errors.New("token missing expiration")
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Unix(int64(exp), 0), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// jwtToOAuth2Token converts a JWT string to an oauth2.Token with expiry
|
|
||||||
func jwtToOAuth2Token(jwtString string) (*oauth2.Token, error) {
|
|
||||||
expiry, err := parseJWTExpiry(jwtString)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &oauth2.Token{
|
|
||||||
AccessToken: jwtString,
|
|
||||||
TokenType: "Bearer",
|
|
||||||
Expiry: expiry,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// computeBasicAuthHeader creates the BasicAuthHeader for bucket operations
|
|
||||||
func computeBasicAuthHeader(bridgeUser, userID string) string {
|
|
||||||
sum := sha256.Sum256([]byte(userID))
|
|
||||||
hexPass := hex.EncodeToString(sum[:])
|
|
||||||
creds := fmt.Sprintf("%s:%s", bridgeUser, hexPass)
|
|
||||||
return "Basic " + base64.StdEncoding.EncodeToString([]byte(creds))
|
|
||||||
}
|
|
||||||
|
|
||||||
// refreshJWTToken refreshes the token using Internxt's refresh endpoint
|
|
||||||
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
|
|
||||||
currentToken, err := oauthutil.GetToken(name, m)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get current token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := internxtconfig.NewDefaultToken(currentToken.AccessToken)
|
|
||||||
resp, err := internxtauth.RefreshToken(ctx, cfg)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("refresh request failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.NewToken == "" {
|
|
||||||
return errors.New("refresh response missing newToken")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert JWT to oauth2.Token format
|
|
||||||
token, err := jwtToOAuth2Token(resp.NewToken)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse refreshed token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = oauthutil.PutToken(name, m, token, false)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to save token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.User.Bucket != "" {
|
|
||||||
m.Set("bucket", resp.User.Bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(name, "Token refreshed successfully, new expiry: %v", token.Expiry)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// reLogin performs a full re-login using stored email+password credentials.
|
|
||||||
// Returns the AccessResponse on success, or an error if 2FA is required or login fails.
|
|
||||||
func (f *Fs) reLogin(ctx context.Context) (*internxtauth.AccessResponse, error) {
|
|
||||||
password, err := obscure.Reveal(f.opt.Pass)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't decrypt password: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := internxtconfig.NewDefaultToken("")
|
|
||||||
cfg.HTTPClient = fshttp.NewClient(ctx)
|
|
||||||
|
|
||||||
loginResp, err := internxtauth.Login(ctx, cfg, f.opt.Email)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("re-login check failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if loginResp.TFA {
|
|
||||||
return nil, errors.New("account requires 2FA - please run: rclone config reconnect " + f.name + ":")
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := internxtauth.DoLogin(ctx, cfg, f.opt.Email, password, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("re-login failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// refreshOrReLogin tries to refresh the JWT token first; if that fails with 401,
|
|
||||||
// it falls back to a full re-login using stored credentials.
|
|
||||||
func (f *Fs) refreshOrReLogin(ctx context.Context) error {
|
|
||||||
refreshErr := refreshJWTToken(ctx, f.name, f.m)
|
|
||||||
if refreshErr == nil {
|
|
||||||
newToken, err := oauthutil.GetToken(f.name, f.m)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get refreshed token: %w", err)
|
|
||||||
}
|
|
||||||
f.cfg.Token = newToken.AccessToken
|
|
||||||
f.cfg.BasicAuthHeader = computeBasicAuthHeader(f.bridgeUser, f.userID)
|
|
||||||
fs.Debugf(f, "Token refresh succeeded")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var httpErr *sdkerrors.HTTPError
|
|
||||||
if !errors.As(refreshErr, &httpErr) || httpErr.StatusCode() != 401 {
|
|
||||||
if fserrors.ShouldRetry(refreshErr) {
|
|
||||||
return refreshErr
|
|
||||||
}
|
|
||||||
return refreshErr
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(f, "Token refresh returned 401, attempting re-login with stored credentials")
|
|
||||||
|
|
||||||
resp, err := f.reLogin(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("re-login fallback failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
oauthToken, err := jwtToOAuth2Token(resp.NewToken)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse re-login token: %w", err)
|
|
||||||
}
|
|
||||||
err = oauthutil.PutToken(f.name, f.m, oauthToken, true)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to save re-login token: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f.cfg.Token = oauthToken.AccessToken
|
|
||||||
f.bridgeUser = resp.User.BridgeUser
|
|
||||||
f.userID = resp.User.UserID
|
|
||||||
f.cfg.BasicAuthHeader = computeBasicAuthHeader(f.bridgeUser, f.userID)
|
|
||||||
f.cfg.Bucket = resp.User.Bucket
|
|
||||||
f.cfg.RootFolderID = resp.User.RootFolderID
|
|
||||||
|
|
||||||
fs.Debugf(f, "Re-login succeeded, new token expiry: %v", oauthToken.Expiry)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// reAuthorize is called after getting 401 from the server.
|
|
||||||
// It serializes re-auth attempts and uses a circuit-breaker to avoid infinite loops.
|
|
||||||
func (f *Fs) reAuthorize(ctx context.Context) error {
|
|
||||||
f.authMu.Lock()
|
|
||||||
defer f.authMu.Unlock()
|
|
||||||
|
|
||||||
if f.authFailed {
|
|
||||||
return errors.New("re-authorization permanently failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.refreshOrReLogin(ctx)
|
|
||||||
if err != nil {
|
|
||||||
f.authFailed = true
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,14 +0,0 @@
|
|||||||
package internxt_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestInternxt:",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -33,10 +33,12 @@ func TestRemove(t *testing.T) {
|
|||||||
assert.True(t, exists())
|
assert.True(t, exists())
|
||||||
// close the file in the background
|
// close the file in the background
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Go(func() {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
time.Sleep(250 * time.Millisecond)
|
time.Sleep(250 * time.Millisecond)
|
||||||
require.NoError(t, fd.Close())
|
require.NoError(t, fd.Close())
|
||||||
})
|
}()
|
||||||
// delete the open file
|
// delete the open file
|
||||||
err = remove(name)
|
err = remove(name)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
func remove(name string) (err error) {
|
func remove(name string) (err error) {
|
||||||
const maxTries = 10
|
const maxTries = 10
|
||||||
var sleepTime = 1 * time.Millisecond
|
var sleepTime = 1 * time.Millisecond
|
||||||
for i := range maxTries {
|
for i := 0; i < maxTries; i++ {
|
||||||
err = os.Remove(name)
|
err = os.Remove(name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ type ListItem struct {
|
|||||||
Count struct {
|
Count struct {
|
||||||
Folders int `json:"folders"`
|
Folders int `json:"folders"`
|
||||||
Files int `json:"files"`
|
Files int `json:"files"`
|
||||||
} `json:"count"`
|
} `json:"count,omitempty"`
|
||||||
Kind string `json:"kind"`
|
Kind string `json:"kind"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
@@ -154,7 +154,7 @@ type FolderInfoResponse struct {
|
|||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Home string `json:"home"`
|
Home string `json:"home"`
|
||||||
List []ListItem `json:"list"`
|
List []ListItem `json:"list"`
|
||||||
} `json:"body"`
|
} `json:"body,omitempty"`
|
||||||
Time int64 `json:"time"`
|
Time int64 `json:"time"`
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
|
|||||||
@@ -17,10 +17,12 @@ Improvements:
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -254,7 +256,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
defer megaCacheMu.Unlock()
|
defer megaCacheMu.Unlock()
|
||||||
srv := megaCache[opt.User]
|
srv := megaCache[opt.User]
|
||||||
if srv == nil {
|
if srv == nil {
|
||||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
// srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||||
|
|
||||||
|
// Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22.
|
||||||
|
// Relevant issues:
|
||||||
|
// https://github.com/rclone/rclone/issues/8565
|
||||||
|
// https://github.com/meganz/webclient/issues/103
|
||||||
|
clt := fshttp.NewClient(ctx)
|
||||||
|
clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||||
|
var ids []uint16
|
||||||
|
// Read default ciphers
|
||||||
|
for _, cs := range tls.CipherSuites() {
|
||||||
|
ids = append(ids, cs.ID)
|
||||||
|
}
|
||||||
|
// Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints
|
||||||
|
// (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025.
|
||||||
|
t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||||
|
})
|
||||||
|
srv = mega.New().SetClient(clt)
|
||||||
|
|
||||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||||
srv.SetHTTPS(opt.UseHTTPS)
|
srv.SetHTTPS(opt.UseHTTPS)
|
||||||
srv.SetLogger(func(format string, v ...any) {
|
srv.SetLogger(func(format string, v ...any) {
|
||||||
|
|||||||
@@ -50,12 +50,12 @@ type Identity struct {
|
|||||||
// to represent a set of identities associated with various events for
|
// to represent a set of identities associated with various events for
|
||||||
// an item, such as created by or last modified by.
|
// an item, such as created by or last modified by.
|
||||||
type IdentitySet struct {
|
type IdentitySet struct {
|
||||||
User Identity `json:"user"`
|
User Identity `json:"user,omitempty"`
|
||||||
Application Identity `json:"application"`
|
Application Identity `json:"application,omitempty"`
|
||||||
Device Identity `json:"device"`
|
Device Identity `json:"device,omitempty"`
|
||||||
Group Identity `json:"group"`
|
Group Identity `json:"group,omitempty"`
|
||||||
SiteGroup Identity `json:"siteGroup"` // The SharePoint group associated with this action. Optional.
|
SiteGroup Identity `json:"siteGroup,omitempty"` // The SharePoint group associated with this action. Optional.
|
||||||
SiteUser Identity `json:"siteUser"` // The SharePoint user associated with this action. Optional.
|
SiteUser Identity `json:"siteUser,omitempty"` // The SharePoint user associated with this action. Optional.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||||
@@ -133,7 +133,7 @@ type RemoteItemFacet struct {
|
|||||||
|
|
||||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||||
type FolderFacet struct {
|
type FolderFacet struct {
|
||||||
ChildCount int64 `json:"childCount,omitempty"` // Number of children contained immediately within this container.
|
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
|
||||||
}
|
}
|
||||||
|
|
||||||
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
||||||
@@ -155,8 +155,8 @@ type FileFacet struct {
|
|||||||
// facet can be used to specify the last modified date or created date
|
// facet can be used to specify the last modified date or created date
|
||||||
// of the item as it was on the local device.
|
// of the item as it was on the local device.
|
||||||
type FileSystemInfoFacet struct {
|
type FileSystemInfoFacet struct {
|
||||||
CreatedDateTime Timestamp `json:"createdDateTime"` // The UTC date and time the file was created on a client.
|
CreatedDateTime Timestamp `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client.
|
||||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // The UTC date and time the file was last modified on a client.
|
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client.
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeletedFacet indicates that the item on OneDrive has been
|
// DeletedFacet indicates that the item on OneDrive has been
|
||||||
@@ -175,10 +175,10 @@ type PackageFacet struct {
|
|||||||
// SharedType indicates a DriveItem has been shared with others. The resource includes information about how the item is shared.
|
// SharedType indicates a DriveItem has been shared with others. The resource includes information about how the item is shared.
|
||||||
// If a Driveitem has a non-null shared facet, the item has been shared.
|
// If a Driveitem has a non-null shared facet, the item has been shared.
|
||||||
type SharedType struct {
|
type SharedType struct {
|
||||||
Owner IdentitySet `json:"owner"` // The identity of the owner of the shared item. Read-only.
|
Owner IdentitySet `json:"owner,omitempty"` // The identity of the owner of the shared item. Read-only.
|
||||||
Scope string `json:"scope,omitempty"` // Indicates the scope of how the item is shared: anonymous, organization, or users. Read-only.
|
Scope string `json:"scope,omitempty"` // Indicates the scope of how the item is shared: anonymous, organization, or users. Read-only.
|
||||||
SharedBy IdentitySet `json:"sharedBy"` // The identity of the user who shared the item. Read-only.
|
SharedBy IdentitySet `json:"sharedBy,omitempty"` // The identity of the user who shared the item. Read-only.
|
||||||
SharedDateTime Timestamp `json:"sharedDateTime"` // The UTC date and time when the item was shared. Read-only.
|
SharedDateTime Timestamp `json:"sharedDateTime,omitempty"` // The UTC date and time when the item was shared. Read-only.
|
||||||
}
|
}
|
||||||
|
|
||||||
// SharingInvitationType groups invitation-related data items into a single structure.
|
// SharingInvitationType groups invitation-related data items into a single structure.
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
"description": {
|
"description": {
|
||||||
Help: "A short description of the file. Max 1024 characters. No longer supported by Microsoft.",
|
Help: "A short description of the file. Max 1024 characters. Only supported for OneDrive Personal.",
|
||||||
Type: "string",
|
Type: "string",
|
||||||
Example: "Contract for signing",
|
Example: "Contract for signing",
|
||||||
},
|
},
|
||||||
@@ -259,8 +259,12 @@ func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, e
|
|||||||
m.btime = t
|
m.btime = t
|
||||||
numSet++
|
numSet++
|
||||||
case "description":
|
case "description":
|
||||||
fs.Debugf(m.remote, "metadata description is no longer supported -- skipping: %s", v)
|
if m.fs.driveType != driveTypePersonal {
|
||||||
continue
|
fs.Debugf(m.remote, "metadata description is only supported for OneDrive Personal -- skipping: %s", v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
m.description = v
|
||||||
|
numSet++
|
||||||
case "permissions":
|
case "permissions":
|
||||||
if !m.fs.opt.MetadataPermissions.IsSet(rwWrite) {
|
if !m.fs.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||||
continue
|
continue
|
||||||
@@ -288,6 +292,9 @@ func (m *Metadata) toAPIMetadata() api.Metadata {
|
|||||||
update := api.Metadata{
|
update := api.Metadata{
|
||||||
FileSystemInfo: &api.FileSystemInfoFacet{},
|
FileSystemInfo: &api.FileSystemInfoFacet{},
|
||||||
}
|
}
|
||||||
|
if m.description != "" && m.fs.driveType == driveTypePersonal {
|
||||||
|
update.Description = m.description
|
||||||
|
}
|
||||||
if !m.mtime.IsZero() {
|
if !m.mtime.IsZero() {
|
||||||
update.FileSystemInfo.LastModifiedDateTime = api.Timestamp(m.mtime)
|
update.FileSystemInfo.LastModifiedDateTime = api.Timestamp(m.mtime)
|
||||||
}
|
}
|
||||||
@@ -596,10 +603,12 @@ func (m *Metadata) addPermission(ctx context.Context, p *api.PermissionsType) (n
|
|||||||
|
|
||||||
req := &api.AddPermissionsRequest{
|
req := &api.AddPermissionsRequest{
|
||||||
Recipients: fillRecipients(p, m.fs.driveType),
|
Recipients: fillRecipients(p, m.fs.driveType),
|
||||||
RequireSignIn: true,
|
RequireSignIn: m.fs.driveType != driveTypePersonal, // personal and business have conflicting requirements
|
||||||
Roles: p.Roles,
|
Roles: p.Roles,
|
||||||
}
|
}
|
||||||
req.RetainInheritedPermissions = false
|
if m.fs.driveType != driveTypePersonal {
|
||||||
|
req.RetainInheritedPermissions = false // not supported for personal
|
||||||
|
}
|
||||||
|
|
||||||
if p.Link != nil && p.Link.Scope == api.AnonymousScope {
|
if p.Link != nil && p.Link.Scope == api.AnonymousScope {
|
||||||
link, err := m.fs.PublicLink(ctx, m.remote, fs.DurationOff, false)
|
link, err := m.fs.PublicLink(ctx, m.remote, fs.DurationOff, false)
|
||||||
@@ -807,13 +816,15 @@ func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
_, meta, err = f.createDir(ctx, parentID, dir, leaf, metadata)
|
info, meta, err = f.createDir(ctx, parentID, dir, leaf, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// for some reason, OneDrive Business and Personal needs this extra step to set modtime. Seems like a bug...
|
if f.driveType != driveTypePersonal {
|
||||||
fs.Debugf(dir, "setting time %v", meta.mtime)
|
// for some reason, OneDrive Business needs this extra step to set modtime, while Personal does not. Seems like a bug...
|
||||||
info, err = meta.Write(ctx, false)
|
fs.Debugf(dir, "setting time %v", meta.mtime)
|
||||||
|
info, err = meta.Write(ctx, false)
|
||||||
|
}
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
// Directory exists and needs updating
|
// Directory exists and needs updating
|
||||||
info, meta, err = f.updateDir(ctx, dirID, dir, metadata)
|
info, meta, err = f.updateDir(ctx, dirID, dir, metadata)
|
||||||
|
|||||||
@@ -403,7 +403,7 @@ This is why this flag is not set as the default.
|
|||||||
|
|
||||||
As a rule of thumb if nearly all of your data is under rclone's root
|
As a rule of thumb if nearly all of your data is under rclone's root
|
||||||
directory (the |root/directory| in |onedrive:root/directory|) then
|
directory (the |root/directory| in |onedrive:root/directory|) then
|
||||||
using this flag will be a big performance win. If your data is
|
using this flag will be be a big performance win. If your data is
|
||||||
mostly not under the root then using this flag will be a big
|
mostly not under the root then using this flag will be a big
|
||||||
performance loss.
|
performance loss.
|
||||||
|
|
||||||
@@ -2554,7 +2554,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
|||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.unAuth.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -136,6 +136,11 @@ func (f *Fs) TestReadPermissions(t *testing.T, r *fstest.Run) {
|
|||||||
_, expectedMeta := f.putWithMeta(ctx, t, &file1, []*api.PermissionsType{}) // return var intentionally switched here
|
_, expectedMeta := f.putWithMeta(ctx, t, &file1, []*api.PermissionsType{}) // return var intentionally switched here
|
||||||
permissions := defaultPermissions(f.driveType)
|
permissions := defaultPermissions(f.driveType)
|
||||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||||
|
if f.driveType == driveTypePersonal {
|
||||||
|
perms, ok := actualMeta["permissions"]
|
||||||
|
assert.False(t, ok, fmt.Sprintf("permissions metadata key was unexpectedly found: %v", perms))
|
||||||
|
return
|
||||||
|
}
|
||||||
assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"])
|
assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"])
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,7 +159,7 @@ func (f *Fs) TestReadMetadata(t *testing.T, r *fstest.Run) {
|
|||||||
if slices.Contains(optionals, k) {
|
if slices.Contains(optionals, k) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if k == "description" {
|
if k == "description" && f.driveType != driveTypePersonal {
|
||||||
continue // not supported
|
continue // not supported
|
||||||
}
|
}
|
||||||
gotV, ok := actualMeta[k]
|
gotV, ok := actualMeta[k]
|
||||||
@@ -191,7 +196,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
|||||||
if slices.Contains(optionals, k) {
|
if slices.Contains(optionals, k) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if k == "description" {
|
if k == "description" && f.driveType != driveTypePersonal {
|
||||||
continue // not supported
|
continue // not supported
|
||||||
}
|
}
|
||||||
gotV, ok := actualMeta[k]
|
gotV, ok := actualMeta[k]
|
||||||
@@ -412,7 +417,9 @@ func (f *Fs) compareMeta(t *testing.T, expectedMeta, actualMeta fs.Metadata, ign
|
|||||||
compareTimeStrings(t, k, v, gotV, time.Second)
|
compareTimeStrings(t, k, v, gotV, time.Second)
|
||||||
continue
|
continue
|
||||||
case "description":
|
case "description":
|
||||||
continue // not supported
|
if f.driveType != driveTypePersonal {
|
||||||
|
continue // not supported
|
||||||
|
}
|
||||||
}
|
}
|
||||||
assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k))
|
assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k))
|
||||||
assert.Equal(t, v, gotV, actualMeta)
|
assert.Equal(t, v, gotV, actualMeta)
|
||||||
|
|||||||
@@ -60,6 +60,9 @@ type StateChangeConf struct {
|
|||||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
|
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
|
||||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||||
|
|
||||||
|
notfoundTick := 0
|
||||||
|
targetOccurrence := 0
|
||||||
|
|
||||||
// Set a default for times to check for not found
|
// Set a default for times to check for not found
|
||||||
if conf.NotFoundChecks == 0 {
|
if conf.NotFoundChecks == 0 {
|
||||||
conf.NotFoundChecks = 20
|
conf.NotFoundChecks = 20
|
||||||
@@ -81,11 +84,9 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
|||||||
// cancellation channel for the refresh loop
|
// cancellation channel for the refresh loop
|
||||||
cancelCh := make(chan struct{})
|
cancelCh := make(chan struct{})
|
||||||
|
|
||||||
go func() {
|
result := Result{}
|
||||||
notfoundTick := 0
|
|
||||||
targetOccurrence := 0
|
|
||||||
result := Result{}
|
|
||||||
|
|
||||||
|
go func() {
|
||||||
defer close(resCh)
|
defer close(resCh)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
|||||||
@@ -1459,7 +1459,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// opts.Body=0), so upload it as a multipart form POST with
|
// opts.Body=0), so upload it as a multipart form POST with
|
||||||
// Content-Length set.
|
// Content-Length set.
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType)
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,8 +75,8 @@ type ErrorDetails struct {
|
|||||||
Type string `json:"@type,omitempty"`
|
Type string `json:"@type,omitempty"`
|
||||||
Reason string `json:"reason,omitempty"`
|
Reason string `json:"reason,omitempty"`
|
||||||
Domain string `json:"domain,omitempty"`
|
Domain string `json:"domain,omitempty"`
|
||||||
Metadata struct{} `json:"metadata"` // TODO: undiscovered yet
|
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||||
Detail string `json:"detail,omitempty"`
|
Detail string `json:"detail,omitempty"`
|
||||||
@@ -189,8 +189,8 @@ type File struct {
|
|||||||
Apps []*FileApp `json:"apps,omitempty"`
|
Apps []*FileApp `json:"apps,omitempty"`
|
||||||
Audit *FileAudit `json:"audit,omitempty"`
|
Audit *FileAudit `json:"audit,omitempty"`
|
||||||
Collection string `json:"collection,omitempty"` // TODO
|
Collection string `json:"collection,omitempty"` // TODO
|
||||||
CreatedTime Time `json:"created_time"`
|
CreatedTime Time `json:"created_time,omitempty"`
|
||||||
DeleteTime Time `json:"delete_time"`
|
DeleteTime Time `json:"delete_time,omitempty"`
|
||||||
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
||||||
FileExtension string `json:"file_extension,omitempty"`
|
FileExtension string `json:"file_extension,omitempty"`
|
||||||
FolderType string `json:"folder_type,omitempty"`
|
FolderType string `json:"folder_type,omitempty"`
|
||||||
@@ -202,7 +202,7 @@ type File struct {
|
|||||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||||
Medias []*Media `json:"medias,omitempty"`
|
Medias []*Media `json:"medias,omitempty"`
|
||||||
MimeType string `json:"mime_type,omitempty"`
|
MimeType string `json:"mime_type,omitempty"`
|
||||||
ModifiedTime Time `json:"modified_time"` // updated when renamed or moved
|
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||||
OriginalURL string `json:"original_url,omitempty"`
|
OriginalURL string `json:"original_url,omitempty"`
|
||||||
@@ -221,7 +221,7 @@ type File struct {
|
|||||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||||
Trashed bool `json:"trashed,omitempty"`
|
Trashed bool `json:"trashed,omitempty"`
|
||||||
UserID string `json:"user_id,omitempty"`
|
UserID string `json:"user_id,omitempty"`
|
||||||
UserModifiedTime Time `json:"user_modified_time"`
|
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||||
WebContentLink string `json:"web_content_link,omitempty"`
|
WebContentLink string `json:"web_content_link,omitempty"`
|
||||||
Writable bool `json:"writable,omitempty"`
|
Writable bool `json:"writable,omitempty"`
|
||||||
}
|
}
|
||||||
@@ -252,7 +252,7 @@ type Media struct {
|
|||||||
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
|
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
|
||||||
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
||||||
HdrType string `json:"hdr_type,omitempty"`
|
HdrType string `json:"hdr_type,omitempty"`
|
||||||
} `json:"video"`
|
} `json:"video,omitempty"`
|
||||||
Link *Link `json:"link,omitempty"`
|
Link *Link `json:"link,omitempty"`
|
||||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||||
VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something?
|
VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||||
@@ -290,11 +290,11 @@ type FileApp struct {
|
|||||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||||
IconLink string `json:"icon_link,omitempty"`
|
IconLink string `json:"icon_link,omitempty"`
|
||||||
IsDefault bool `json:"is_default,omitempty"`
|
IsDefault bool `json:"is_default,omitempty"`
|
||||||
Params struct{} `json:"params"` // TODO
|
Params struct{} `json:"params,omitempty"` // TODO
|
||||||
CategoryIDs []any `json:"category_ids,omitempty"`
|
CategoryIDs []any `json:"category_ids,omitempty"`
|
||||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||||
Space string `json:"space,omitempty"`
|
Space string `json:"space,omitempty"`
|
||||||
Links struct{} `json:"links"` // TODO
|
Links struct{} `json:"links,omitempty"` // TODO
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -320,8 +320,8 @@ type Task struct {
|
|||||||
FileName string `json:"file_name,omitempty"`
|
FileName string `json:"file_name,omitempty"`
|
||||||
FileSize string `json:"file_size,omitempty"`
|
FileSize string `json:"file_size,omitempty"`
|
||||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||||
CreatedTime Time `json:"created_time"`
|
CreatedTime Time `json:"created_time,omitempty"`
|
||||||
UpdatedTime Time `json:"updated_time"`
|
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||||
Progress int `json:"progress,omitempty"`
|
Progress int `json:"progress,omitempty"`
|
||||||
@@ -368,7 +368,7 @@ type ResumableParams struct {
|
|||||||
AccessKeySecret string `json:"access_key_secret,omitempty"`
|
AccessKeySecret string `json:"access_key_secret,omitempty"`
|
||||||
Bucket string `json:"bucket,omitempty"`
|
Bucket string `json:"bucket,omitempty"`
|
||||||
Endpoint string `json:"endpoint,omitempty"`
|
Endpoint string `json:"endpoint,omitempty"`
|
||||||
Expiration Time `json:"expiration"`
|
Expiration Time `json:"expiration,omitempty"`
|
||||||
Key string `json:"key,omitempty"`
|
Key string `json:"key,omitempty"`
|
||||||
SecurityToken string `json:"security_token,omitempty"`
|
SecurityToken string `json:"security_token,omitempty"`
|
||||||
}
|
}
|
||||||
@@ -409,7 +409,7 @@ type About struct {
|
|||||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||||
Quota *Quota `json:"quota,omitempty"`
|
Quota *Quota `json:"quota,omitempty"`
|
||||||
ExpiresAt string `json:"expires_at,omitempty"`
|
ExpiresAt string `json:"expires_at,omitempty"`
|
||||||
Quotas struct{} `json:"quotas"` // maybe []*Quota?
|
Quotas struct{} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||||
}
|
}
|
||||||
|
|
||||||
// Quota informs drive quota
|
// Quota informs drive quota
|
||||||
@@ -445,8 +445,8 @@ type User struct {
|
|||||||
PhoneNumber string `json:"phone_number,omitempty"`
|
PhoneNumber string `json:"phone_number,omitempty"`
|
||||||
Password string `json:"password,omitempty"` // "SET" if configured
|
Password string `json:"password,omitempty"` // "SET" if configured
|
||||||
Status string `json:"status,omitempty"` // "ACTIVE"
|
Status string `json:"status,omitempty"` // "ACTIVE"
|
||||||
CreatedAt Time `json:"created_at"`
|
CreatedAt Time `json:"created_at,omitempty"`
|
||||||
PasswordUpdatedAt Time `json:"password_updated_at"`
|
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserProvider details third-party authentication
|
// UserProvider details third-party authentication
|
||||||
@@ -464,11 +464,11 @@ type VIP struct {
|
|||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
RedirectURI string `json:"redirect_uri,omitempty"`
|
RedirectURI string `json:"redirect_uri,omitempty"`
|
||||||
Data struct {
|
Data struct {
|
||||||
Expire Time `json:"expire"`
|
Expire Time `json:"expire,omitempty"`
|
||||||
Status string `json:"status,omitempty"` // "invalid" or "ok"
|
Status string `json:"status,omitempty"` // "invalid" or "ok"
|
||||||
Type string `json:"type,omitempty"` // "novip" or "platinum"
|
Type string `json:"type,omitempty"` // "novip" or "platinum"
|
||||||
UserID string `json:"user_id,omitempty"` // same as User.Sub
|
UserID string `json:"user_id,omitempty"` // same as User.Sub
|
||||||
} `json:"data"`
|
} `json:"data,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecompressResult is a response to RequestDecompress
|
// DecompressResult is a response to RequestDecompress
|
||||||
@@ -538,7 +538,7 @@ type CaptchaToken struct {
|
|||||||
CaptchaToken string `json:"captcha_token"`
|
CaptchaToken string `json:"captcha_token"`
|
||||||
ExpiresIn int64 `json:"expires_in"` // currently 300s
|
ExpiresIn int64 `json:"expires_in"` // currently 300s
|
||||||
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
|
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
|
||||||
Expiry time.Time `json:"expiry"`
|
Expiry time.Time `json:"expiry,omitempty"`
|
||||||
URL string `json:"url,omitempty"` // a link for users to solve captcha
|
URL string `json:"url,omitempty"` // a link for users to solve captcha
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
|||||||
for i := range iVal.NumField() {
|
for i := range iVal.NumField() {
|
||||||
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
||||||
}
|
}
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream")
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to make multipart upload: %w", err)
|
return fmt.Errorf("failed to make multipart upload: %w", err)
|
||||||
}
|
}
|
||||||
@@ -1608,7 +1608,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
|||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// add offline download task for url
|
// add offline download task for url
|
||||||
func (f *Fs) addURL(ctx context.Context, url, name, path string) (*api.Task, error) {
|
func (f *Fs) addURL(ctx context.Context, url, path string) (*api.Task, error) {
|
||||||
req := api.RequestNewTask{
|
req := api.RequestNewTask{
|
||||||
Kind: api.KindOfFile,
|
Kind: api.KindOfFile,
|
||||||
UploadType: "UPLOAD_TYPE_URL",
|
UploadType: "UPLOAD_TYPE_URL",
|
||||||
@@ -1617,9 +1617,6 @@ func (f *Fs) addURL(ctx context.Context, url, name, path string) (*api.Task, err
|
|||||||
},
|
},
|
||||||
FolderType: "DOWNLOAD",
|
FolderType: "DOWNLOAD",
|
||||||
}
|
}
|
||||||
if name != "" {
|
|
||||||
req.Name = f.opt.Enc.FromStandardName(name)
|
|
||||||
}
|
|
||||||
if parentID, err := f.dirCache.FindDir(ctx, path, false); err == nil {
|
if parentID, err := f.dirCache.FindDir(ctx, path, false); err == nil {
|
||||||
req.ParentID = parentIDForRequest(parentID)
|
req.ParentID = parentIDForRequest(parentID)
|
||||||
req.FolderType = ""
|
req.FolderType = ""
|
||||||
@@ -1684,18 +1681,14 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
Short: "Add offline download task for url.",
|
Short: "Add offline download task for url.",
|
||||||
Long: `This command adds offline download task for url.
|
Long: `This command adds offline download task for url.
|
||||||
|
|
||||||
Usage examples:
|
Usage example:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```console" + `
|
||||||
rclone backend addurl pikpak:dirpath url
|
rclone backend addurl pikpak:dirpath url
|
||||||
rclone backend addurl pikpak:dirpath url -o name=custom_filename.zip
|
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
||||||
download will fallback to default 'My Pack' folder.`,
|
download will fallback to default 'My Pack' folder.`,
|
||||||
Opts: map[string]string{
|
|
||||||
"name": "Custom filename for the downloaded file.",
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "decompress",
|
Name: "decompress",
|
||||||
Short: "Request decompress of a file/files in a folder.",
|
Short: "Request decompress of a file/files in a folder.",
|
||||||
@@ -1739,11 +1732,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
if len(arg) != 1 {
|
if len(arg) != 1 {
|
||||||
return nil, errors.New("need exactly 1 argument")
|
return nil, errors.New("need exactly 1 argument")
|
||||||
}
|
}
|
||||||
filename := ""
|
return f.addURL(ctx, arg[0], "")
|
||||||
if name, ok := opt["name"]; ok {
|
|
||||||
filename = name
|
|
||||||
}
|
|
||||||
return f.addURL(ctx, arg[0], filename, "")
|
|
||||||
case "decompress":
|
case "decompress":
|
||||||
filename := ""
|
filename := ""
|
||||||
if len(arg) > 0 {
|
if len(arg) > 0 {
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
protonDriveAPI "github.com/rclone/Proton-API-Bridge"
|
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||||
"github.com/rclone/go-proton-api"
|
"github.com/henrybear327/go-proton-api"
|
||||||
|
|
||||||
"github.com/pquerna/otp/totp"
|
"github.com/pquerna/otp/totp"
|
||||||
|
|
||||||
|
|||||||
@@ -170,7 +170,6 @@ In `backend/s3/provider/YourProvider.yaml`
|
|||||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||||
EtagIsNotMD5 *bool `yaml:"etag_is_not_md5,omitempty"`
|
|
||||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
name: BizflyCloud
|
|
||||||
description: Bizfly Cloud Simple Storage
|
|
||||||
region:
|
|
||||||
hn: Ha Noi
|
|
||||||
hcm: Ho Chi Minh
|
|
||||||
endpoint:
|
|
||||||
hn.ss.bfcplatform.vn: Hanoi endpoint
|
|
||||||
hcm.ss.bfcplatform.vn: Ho Chi Minh endpoint
|
|
||||||
acl: {}
|
|
||||||
bucket_acl: true
|
|
||||||
quirks:
|
|
||||||
force_path_style: true
|
|
||||||
list_url_encode: false
|
|
||||||
use_multipart_etag: false
|
|
||||||
use_already_exists: false
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
name: Fastly
|
|
||||||
description: Fastly Object Storage
|
|
||||||
region:
|
|
||||||
au-east-1: AU East 1
|
|
||||||
eu-central: EU Central
|
|
||||||
eu-south-1: EU South 1
|
|
||||||
jp-central-1: JP Central 1
|
|
||||||
uk-east-1: UK East 1
|
|
||||||
us-central-1: US Central 1
|
|
||||||
us-east: US East
|
|
||||||
us-west: US West
|
|
||||||
endpoint:
|
|
||||||
au-east-1.object.fastlystorage.app: AU East 1
|
|
||||||
eu-central.object.fastlystorage.app: EU Central
|
|
||||||
eu-south-1.object.fastlystorage.app: EU South 1
|
|
||||||
jp-central-1.object.fastlystorage.app: JP Central 1
|
|
||||||
uk-east-1.object.fastlystorage.app: UK East 1
|
|
||||||
us-central-1.object.fastlystorage.app: US Central 1
|
|
||||||
us-east.object.fastlystorage.app: US East
|
|
||||||
us-west.object.fastlystorage.app: US West
|
|
||||||
quirks:
|
|
||||||
force_path_style: true
|
|
||||||
use_already_exists: false
|
|
||||||
use_multipart_etag: false
|
|
||||||
use_multipart_uploads: false
|
|
||||||
etag_is_not_md5: true
|
|
||||||
@@ -15,7 +15,7 @@ endpoint:
|
|||||||
acl: {}
|
acl: {}
|
||||||
bucket_acl: true
|
bucket_acl: true
|
||||||
quirks:
|
quirks:
|
||||||
list_version: 2
|
list_version: 1
|
||||||
force_path_style: true
|
force_path_style: true
|
||||||
list_url_encode: false
|
list_url_encode: false
|
||||||
use_multipart_etag: false
|
use_multipart_etag: false
|
||||||
|
|||||||
@@ -3,17 +3,11 @@ description: IONOS Cloud
|
|||||||
region:
|
region:
|
||||||
de: Frankfurt, Germany
|
de: Frankfurt, Germany
|
||||||
eu-central-2: Berlin, Germany
|
eu-central-2: Berlin, Germany
|
||||||
eu-central-3: Berlin, Germany
|
|
||||||
eu-central-4: Frankfurt, Germany
|
|
||||||
eu-south-2: Logrono, Spain
|
eu-south-2: Logrono, Spain
|
||||||
us-central-1: Lenexa, USA
|
|
||||||
endpoint:
|
endpoint:
|
||||||
s3.eu-central-1.ionoscloud.com: Frankfurt, Germany
|
s3-eu-central-1.ionoscloud.com: Frankfurt, Germany
|
||||||
s3.eu-central-2.ionoscloud.com: Berlin, Germany
|
s3-eu-central-2.ionoscloud.com: Berlin, Germany
|
||||||
s3.eu-central-3.ionoscloud.com: Berlin, Germany
|
s3-eu-south-2.ionoscloud.com: Logrono, Spain
|
||||||
s3.eu-central-4.ionoscloud.com: Frankfurt, Germany
|
|
||||||
s3.eu-south-2.ionoscloud.com: Logrono, Spain
|
|
||||||
s3.us-central-1.ionoscloud.com: Lenexa, USA
|
|
||||||
acl: {}
|
acl: {}
|
||||||
bucket_acl: true
|
bucket_acl: true
|
||||||
quirks:
|
quirks:
|
||||||
|
|||||||
@@ -18,11 +18,11 @@ storage_class:
|
|||||||
GLACIER: |-
|
GLACIER: |-
|
||||||
Archived storage.
|
Archived storage.
|
||||||
Prices are lower, but it needs to be restored first to be accessed.
|
Prices are lower, but it needs to be restored first to be accessed.
|
||||||
Available in the FR-PAR region only.
|
Available in FR-PAR and NL-AMS regions.
|
||||||
ONEZONE_IA: |-
|
ONEZONE_IA: |-
|
||||||
One Zone - Infrequent Access.
|
One Zone - Infrequent Access.
|
||||||
A good choice for storing secondary backup copies or easily re-creatable data.
|
A good choice for storing secondary backup copies or easily re-creatable data.
|
||||||
Available in all regions.
|
Available in the FR-PAR region only.
|
||||||
bucket_acl: true
|
bucket_acl: true
|
||||||
quirks:
|
quirks:
|
||||||
max_upload_parts: 1000
|
max_upload_parts: 1000
|
||||||
|
|||||||
14
backend/s3/provider/StackPath.yaml
Normal file
14
backend/s3/provider/StackPath.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
name: StackPath
|
||||||
|
description: StackPath Object Storage
|
||||||
|
region: {}
|
||||||
|
endpoint:
|
||||||
|
s3.us-east-2.stackpathstorage.com: US East Endpoint
|
||||||
|
s3.us-west-1.stackpathstorage.com: US West Endpoint
|
||||||
|
s3.eu-central-1.stackpathstorage.com: EU Endpoint
|
||||||
|
acl: {}
|
||||||
|
bucket_acl: true
|
||||||
|
quirks:
|
||||||
|
list_version: 1
|
||||||
|
force_path_style: true
|
||||||
|
list_url_encode: false
|
||||||
|
use_already_exists: false
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
name: Zadara
|
|
||||||
description: Zadara Object Storage
|
|
||||||
region:
|
|
||||||
us-east-1: |-
|
|
||||||
The default region.
|
|
||||||
Leave location constraint empty.
|
|
||||||
endpoint: {}
|
|
||||||
quirks:
|
|
||||||
force_path_style: true
|
|
||||||
@@ -32,7 +32,6 @@ type Quirks struct {
|
|||||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||||
EtagIsNotMD5 *bool `yaml:"etag_is_not_md5,omitempty"`
|
|
||||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||||
|
|||||||
386
backend/s3/s3.go
386
backend/s3/s3.go
@@ -828,107 +828,6 @@ use |-vv| to see the debug level logs.
|
|||||||
}, {
|
}, {
|
||||||
Name: "ibm_resource_instance_id",
|
Name: "ibm_resource_instance_id",
|
||||||
Help: "IBM service instance id",
|
Help: "IBM service instance id",
|
||||||
}, {
|
|
||||||
Name: "object_lock_mode",
|
|
||||||
Help: `Object Lock mode to apply when uploading or copying objects.
|
|
||||||
|
|
||||||
Set this to apply Object Lock retention mode to objects.
|
|
||||||
If not set, no Object Lock mode is applied (even with --metadata).
|
|
||||||
|
|
||||||
Note: To enable Object Lock retention, you must set BOTH object_lock_mode
|
|
||||||
AND object_lock_retain_until_date. Setting only one has no effect.
|
|
||||||
|
|
||||||
- GOVERNANCE: Set Object Lock mode to GOVERNANCE
|
|
||||||
- COMPLIANCE: Set Object Lock mode to COMPLIANCE
|
|
||||||
- copy: Copy the mode from the source object (requires --metadata)
|
|
||||||
|
|
||||||
See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html`,
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "GOVERNANCE",
|
|
||||||
Help: "Set Object Lock mode to GOVERNANCE",
|
|
||||||
}, {
|
|
||||||
Value: "COMPLIANCE",
|
|
||||||
Help: "Set Object Lock mode to COMPLIANCE",
|
|
||||||
}, {
|
|
||||||
Value: "copy",
|
|
||||||
Help: "Copy from source object (requires --metadata)",
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
Name: "object_lock_retain_until_date",
|
|
||||||
Help: `Object Lock retention until date to apply when uploading or copying objects.
|
|
||||||
|
|
||||||
Set this to apply Object Lock retention date to objects.
|
|
||||||
If not set, no retention date is applied (even with --metadata).
|
|
||||||
|
|
||||||
Note: To enable Object Lock retention, you must set BOTH object_lock_mode
|
|
||||||
AND object_lock_retain_until_date. Setting only one has no effect.
|
|
||||||
|
|
||||||
Accepts:
|
|
||||||
- RFC 3339 format: 2030-01-02T15:04:05Z
|
|
||||||
- Duration from now: 365d, 1y, 6M (days, years, months)
|
|
||||||
- copy: Copy the date from the source object (requires --metadata)
|
|
||||||
|
|
||||||
See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html`,
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "copy",
|
|
||||||
Help: "Copy from source object (requires --metadata)",
|
|
||||||
}, {
|
|
||||||
Value: "2030-01-01T00:00:00Z",
|
|
||||||
Help: "Set specific date (RFC 3339 format)",
|
|
||||||
}, {
|
|
||||||
Value: "365d",
|
|
||||||
Help: "Set retention for 365 days from now",
|
|
||||||
}, {
|
|
||||||
Value: "1y",
|
|
||||||
Help: "Set retention for 1 year from now",
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
Name: "object_lock_legal_hold_status",
|
|
||||||
Help: `Object Lock legal hold status to apply when uploading or copying objects.
|
|
||||||
|
|
||||||
Set this to apply Object Lock legal hold to objects.
|
|
||||||
If not set, no legal hold is applied (even with --metadata).
|
|
||||||
|
|
||||||
Note: Legal hold is independent of retention and can be set separately.
|
|
||||||
|
|
||||||
- ON: Enable legal hold
|
|
||||||
- OFF: Disable legal hold
|
|
||||||
- copy: Copy the legal hold status from the source object (requires --metadata)
|
|
||||||
|
|
||||||
See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html`,
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "ON",
|
|
||||||
Help: "Enable legal hold",
|
|
||||||
}, {
|
|
||||||
Value: "OFF",
|
|
||||||
Help: "Disable legal hold",
|
|
||||||
}, {
|
|
||||||
Value: "copy",
|
|
||||||
Help: "Copy from source object (requires --metadata)",
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
Name: "bypass_governance_retention",
|
|
||||||
Help: `Allow deleting or modifying objects locked with GOVERNANCE mode.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "bucket_object_lock_enabled",
|
|
||||||
Help: `Enable Object Lock when creating new buckets.`,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
|
||||||
Name: "object_lock_set_after_upload",
|
|
||||||
Help: `Set Object Lock via separate API calls after upload.
|
|
||||||
|
|
||||||
Use this for S3-compatible providers that don't support setting Object Lock
|
|
||||||
headers during PUT operations. When enabled, Object Lock is set via separate
|
|
||||||
PutObjectRetention and PutObjectLegalHold API calls after the upload completes.
|
|
||||||
|
|
||||||
This adds extra API calls per object, so only enable if your provider requires it.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
},
|
},
|
||||||
}}))
|
}}))
|
||||||
}
|
}
|
||||||
@@ -1023,21 +922,6 @@ var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|||||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
"object-lock-mode": {
|
|
||||||
Help: "Object Lock mode: GOVERNANCE or COMPLIANCE",
|
|
||||||
Type: "string",
|
|
||||||
Example: "GOVERNANCE",
|
|
||||||
},
|
|
||||||
"object-lock-retain-until-date": {
|
|
||||||
Help: "Object Lock retention until date",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2030-01-02T15:04:05Z",
|
|
||||||
},
|
|
||||||
"object-lock-legal-hold-status": {
|
|
||||||
Help: "Object Lock legal hold status: ON or OFF",
|
|
||||||
Type: "string",
|
|
||||||
Example: "OFF",
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
@@ -1108,12 +992,6 @@ type Options struct {
|
|||||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||||
UseXID fs.Tristate `config:"use_x_id"`
|
UseXID fs.Tristate `config:"use_x_id"`
|
||||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||||
ObjectLockMode string `config:"object_lock_mode"`
|
|
||||||
ObjectLockRetainUntilDate string `config:"object_lock_retain_until_date"`
|
|
||||||
ObjectLockLegalHoldStatus string `config:"object_lock_legal_hold_status"`
|
|
||||||
BypassGovernanceRetention bool `config:"bypass_governance_retention"`
|
|
||||||
BucketObjectLockEnabled bool `config:"bucket_object_lock_enabled"`
|
|
||||||
ObjectLockSetAfterUpload bool `config:"object_lock_set_after_upload"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
@@ -1158,11 +1036,6 @@ type Object struct {
|
|||||||
contentDisposition *string // Content-Disposition: header
|
contentDisposition *string // Content-Disposition: header
|
||||||
contentEncoding *string // Content-Encoding: header
|
contentEncoding *string // Content-Encoding: header
|
||||||
contentLanguage *string // Content-Language: header
|
contentLanguage *string // Content-Language: header
|
||||||
|
|
||||||
// Object Lock metadata
|
|
||||||
objectLockMode *string // Object Lock mode: GOVERNANCE or COMPLIANCE
|
|
||||||
objectLockRetainUntilDate *time.Time // Object Lock retention until date
|
|
||||||
objectLockLegalHoldStatus *string // Object Lock legal hold: ON or OFF
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// safely dereference the pointer, returning a zero T if nil
|
// safely dereference the pointer, returning a zero T if nil
|
||||||
@@ -1183,21 +1056,6 @@ func getHTTPStatusCode(err error) int {
|
|||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseRetainUntilDate parses a retain until date from a string.
|
|
||||||
// It accepts RFC 3339 format or duration strings like "365d", "1y", "6m".
|
|
||||||
func parseRetainUntilDate(s string) (time.Time, error) {
|
|
||||||
// First try RFC 3339 format
|
|
||||||
if t, err := time.Parse(time.RFC3339, s); err == nil {
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
// Try as a duration from now
|
|
||||||
d, err := fs.ParseDuration(s)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, fmt.Errorf("can't parse %q as RFC 3339 date or duration: %w", s, err)
|
|
||||||
}
|
|
||||||
return time.Now().Add(d), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
// Name of the remote (as passed into NewFs)
|
||||||
@@ -1806,10 +1664,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// MD5 digest of their object data.
|
// MD5 digest of their object data.
|
||||||
f.etagIsNotMD5 = true
|
f.etagIsNotMD5 = true
|
||||||
}
|
}
|
||||||
if provider.Quirks.EtagIsNotMD5 != nil && *provider.Quirks.EtagIsNotMD5 {
|
|
||||||
// Provider always returns ETags that are not MD5 (e.g., mandatory encryption)
|
|
||||||
f.etagIsNotMD5 = true
|
|
||||||
}
|
|
||||||
if opt.DirectoryBucket {
|
if opt.DirectoryBucket {
|
||||||
// Objects uploaded to directory buckets appear to have random ETags
|
// Objects uploaded to directory buckets appear to have random ETags
|
||||||
//
|
//
|
||||||
@@ -1834,9 +1688,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.Provider == "AWS" {
|
if opt.Provider == "AWS" {
|
||||||
f.features.DoubleSlash = true
|
f.features.DoubleSlash = true
|
||||||
}
|
}
|
||||||
if opt.Provider == "Fastly" {
|
|
||||||
f.features.Copy = nil
|
|
||||||
}
|
|
||||||
if opt.Provider == "Rabata" {
|
if opt.Provider == "Rabata" {
|
||||||
f.features.Copy = nil
|
f.features.Copy = nil
|
||||||
}
|
}
|
||||||
@@ -2791,9 +2642,8 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
|||||||
}
|
}
|
||||||
return f.cache.Create(bucket, func() error {
|
return f.cache.Create(bucket, func() error {
|
||||||
req := s3.CreateBucketInput{
|
req := s3.CreateBucketInput{
|
||||||
Bucket: &bucket,
|
Bucket: &bucket,
|
||||||
ACL: types.BucketCannedACL(f.opt.BucketACL),
|
ACL: types.BucketCannedACL(f.opt.BucketACL),
|
||||||
ObjectLockEnabledForBucket: &f.opt.BucketObjectLockEnabled,
|
|
||||||
}
|
}
|
||||||
if f.opt.LocationConstraint != "" {
|
if f.opt.LocationConstraint != "" {
|
||||||
req.CreateBucketConfiguration = &types.CreateBucketConfiguration{
|
req.CreateBucketConfiguration = &types.CreateBucketConfiguration{
|
||||||
@@ -2912,24 +2762,6 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
|||||||
req.StorageClass = types.StorageClass(f.opt.StorageClass)
|
req.StorageClass = types.StorageClass(f.opt.StorageClass)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply Object Lock options via headers (unless ObjectLockSetAfterUpload is set)
|
|
||||||
// "copy" means: keep the value from source (passed via req from prepareUpload/setFrom functions)
|
|
||||||
if !f.opt.ObjectLockSetAfterUpload {
|
|
||||||
if f.opt.ObjectLockMode != "" && !strings.EqualFold(f.opt.ObjectLockMode, "copy") {
|
|
||||||
req.ObjectLockMode = types.ObjectLockMode(strings.ToUpper(f.opt.ObjectLockMode))
|
|
||||||
}
|
|
||||||
if f.opt.ObjectLockRetainUntilDate != "" && !strings.EqualFold(f.opt.ObjectLockRetainUntilDate, "copy") {
|
|
||||||
retainDate, err := parseRetainUntilDate(f.opt.ObjectLockRetainUntilDate)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid object_lock_retain_until_date: %w", err)
|
|
||||||
}
|
|
||||||
req.ObjectLockRetainUntilDate = &retainDate
|
|
||||||
}
|
|
||||||
if f.opt.ObjectLockLegalHoldStatus != "" && !strings.EqualFold(f.opt.ObjectLockLegalHoldStatus, "copy") {
|
|
||||||
req.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(strings.ToUpper(f.opt.ObjectLockLegalHoldStatus))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if src.bytes >= int64(f.opt.CopyCutoff) {
|
if src.bytes >= int64(f.opt.CopyCutoff) {
|
||||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, src)
|
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, src)
|
||||||
}
|
}
|
||||||
@@ -3112,15 +2944,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
|
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
|
||||||
// Use REPLACE directive if metadata is being modified, otherwise S3 ignores our values
|
if ci.Metadata {
|
||||||
// This is needed when:
|
|
||||||
// 1. --metadata flag is set
|
|
||||||
// 2. Any Object Lock option is set (to override or explicitly copy)
|
|
||||||
needsReplace := ci.Metadata ||
|
|
||||||
f.opt.ObjectLockMode != "" ||
|
|
||||||
f.opt.ObjectLockRetainUntilDate != "" ||
|
|
||||||
f.opt.ObjectLockLegalHoldStatus != ""
|
|
||||||
if needsReplace {
|
|
||||||
req.MetadataDirective = types.MetadataDirectiveReplace
|
req.MetadataDirective = types.MetadataDirectiveReplace
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3128,21 +2952,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
return f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set Object Lock via separate API calls if requested
|
|
||||||
if f.opt.ObjectLockSetAfterUpload {
|
|
||||||
if dstObject, ok := dstObj.(*Object); ok {
|
|
||||||
if err := dstObject.setObjectLockAfterUpload(ctx, srcObj); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return dstObj, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
@@ -4021,19 +3831,6 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
|
|||||||
o.contentEncoding = stringClonePointer(removeAWSChunked(resp.ContentEncoding))
|
o.contentEncoding = stringClonePointer(removeAWSChunked(resp.ContentEncoding))
|
||||||
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
|
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
|
||||||
|
|
||||||
// Set Object Lock metadata
|
|
||||||
if resp.ObjectLockMode != "" {
|
|
||||||
mode := string(resp.ObjectLockMode)
|
|
||||||
o.objectLockMode = &mode
|
|
||||||
}
|
|
||||||
if resp.ObjectLockRetainUntilDate != nil {
|
|
||||||
o.objectLockRetainUntilDate = resp.ObjectLockRetainUntilDate
|
|
||||||
}
|
|
||||||
if resp.ObjectLockLegalHoldStatus != "" {
|
|
||||||
status := string(resp.ObjectLockLegalHoldStatus)
|
|
||||||
o.objectLockLegalHoldStatus = &status
|
|
||||||
}
|
|
||||||
|
|
||||||
// If decompressing then size and md5sum are unknown
|
// If decompressing then size and md5sum are unknown
|
||||||
if o.fs.opt.Decompress && deref(o.contentEncoding) == "gzip" {
|
if o.fs.opt.Decompress && deref(o.contentEncoding) == "gzip" {
|
||||||
o.bytes = -1
|
o.bytes = -1
|
||||||
@@ -4752,26 +4549,6 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
case "btime":
|
case "btime":
|
||||||
// write as metadata since we can't set it
|
// write as metadata since we can't set it
|
||||||
ui.req.Metadata[k] = v
|
ui.req.Metadata[k] = v
|
||||||
case "object-lock-mode":
|
|
||||||
// Only apply if option is set to "copy" and not using after-upload API
|
|
||||||
if strings.EqualFold(o.fs.opt.ObjectLockMode, "copy") && !o.fs.opt.ObjectLockSetAfterUpload {
|
|
||||||
ui.req.ObjectLockMode = types.ObjectLockMode(v)
|
|
||||||
}
|
|
||||||
case "object-lock-retain-until-date":
|
|
||||||
// Only apply if option is set to "copy" and not using after-upload API
|
|
||||||
if strings.EqualFold(o.fs.opt.ObjectLockRetainUntilDate, "copy") && !o.fs.opt.ObjectLockSetAfterUpload {
|
|
||||||
retainDate, err := time.Parse(time.RFC3339, v)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(o, "failed to parse object-lock-retain-until-date %q: %v", v, err)
|
|
||||||
} else {
|
|
||||||
ui.req.ObjectLockRetainUntilDate = &retainDate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "object-lock-legal-hold-status":
|
|
||||||
// Only apply if option is set to "copy" and not using after-upload API
|
|
||||||
if strings.EqualFold(o.fs.opt.ObjectLockLegalHoldStatus, "copy") && !o.fs.opt.ObjectLockSetAfterUpload {
|
|
||||||
ui.req.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(v)
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
ui.req.Metadata[k] = v
|
ui.req.Metadata[k] = v
|
||||||
}
|
}
|
||||||
@@ -4837,25 +4614,6 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
if o.fs.opt.StorageClass != "" {
|
if o.fs.opt.StorageClass != "" {
|
||||||
ui.req.StorageClass = types.StorageClass(o.fs.opt.StorageClass)
|
ui.req.StorageClass = types.StorageClass(o.fs.opt.StorageClass)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply Object Lock options via headers (unless ObjectLockSetAfterUpload is set)
|
|
||||||
// "copy" means: keep the value from metadata (already applied above in the switch)
|
|
||||||
if !o.fs.opt.ObjectLockSetAfterUpload {
|
|
||||||
if o.fs.opt.ObjectLockMode != "" && !strings.EqualFold(o.fs.opt.ObjectLockMode, "copy") {
|
|
||||||
ui.req.ObjectLockMode = types.ObjectLockMode(strings.ToUpper(o.fs.opt.ObjectLockMode))
|
|
||||||
}
|
|
||||||
if o.fs.opt.ObjectLockRetainUntilDate != "" && !strings.EqualFold(o.fs.opt.ObjectLockRetainUntilDate, "copy") {
|
|
||||||
retainDate, err := parseRetainUntilDate(o.fs.opt.ObjectLockRetainUntilDate)
|
|
||||||
if err != nil {
|
|
||||||
return ui, fmt.Errorf("invalid object_lock_retain_until_date %q: %w", o.fs.opt.ObjectLockRetainUntilDate, err)
|
|
||||||
}
|
|
||||||
ui.req.ObjectLockRetainUntilDate = &retainDate
|
|
||||||
}
|
|
||||||
if o.fs.opt.ObjectLockLegalHoldStatus != "" && !strings.EqualFold(o.fs.opt.ObjectLockLegalHoldStatus, "copy") {
|
|
||||||
ui.req.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(strings.ToUpper(o.fs.opt.ObjectLockLegalHoldStatus))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply upload options
|
// Apply upload options
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
key, value := option.Header()
|
key, value := option.Header()
|
||||||
@@ -4979,14 +4737,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
fs.Debugf(o, "Multipart upload Etag: %s OK", wantETag)
|
fs.Debugf(o, "Multipart upload Etag: %s OK", wantETag)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set Object Lock via separate API calls if requested
|
|
||||||
if o.fs.opt.ObjectLockSetAfterUpload {
|
|
||||||
if err := o.setObjectLockAfterUpload(ctx, src); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -5004,9 +4754,6 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
if o.fs.opt.RequesterPays {
|
if o.fs.opt.RequesterPays {
|
||||||
req.RequestPayer = types.RequestPayerRequester
|
req.RequestPayer = types.RequestPayerRequester
|
||||||
}
|
}
|
||||||
if o.fs.opt.BypassGovernanceRetention {
|
|
||||||
req.BypassGovernanceRetention = &o.fs.opt.BypassGovernanceRetention
|
|
||||||
}
|
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := o.fs.c.DeleteObject(ctx, &req)
|
_, err := o.fs.c.DeleteObject(ctx, &req)
|
||||||
return o.fs.shouldRetry(ctx, err)
|
return o.fs.shouldRetry(ctx, err)
|
||||||
@@ -5014,120 +4761,6 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// setObjectRetention sets Object Lock retention on an object via PutObjectRetention API
|
|
||||||
//
|
|
||||||
// Note: We use smithyhttp.AddContentChecksumMiddleware to ensure Content-MD5 is
|
|
||||||
// calculated for the request body. The AWS SDK v2 switched from MD5 to CRC32 as
|
|
||||||
// the default checksum algorithm, but some S3-compatible providers (e.g. MinIO)
|
|
||||||
// still require Content-MD5 for PutObjectRetention requests.
|
|
||||||
// See: https://github.com/aws/aws-sdk-go-v2/discussions/2960
|
|
||||||
func (o *Object) setObjectRetention(ctx context.Context, mode types.ObjectLockRetentionMode, retainUntilDate time.Time) error {
|
|
||||||
bucket, bucketPath := o.split()
|
|
||||||
req := s3.PutObjectRetentionInput{
|
|
||||||
Bucket: &bucket,
|
|
||||||
Key: &bucketPath,
|
|
||||||
VersionId: o.versionID,
|
|
||||||
Retention: &types.ObjectLockRetention{
|
|
||||||
Mode: mode,
|
|
||||||
RetainUntilDate: &retainUntilDate,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if o.fs.opt.RequesterPays {
|
|
||||||
req.RequestPayer = types.RequestPayerRequester
|
|
||||||
}
|
|
||||||
if o.fs.opt.BypassGovernanceRetention {
|
|
||||||
req.BypassGovernanceRetention = &o.fs.opt.BypassGovernanceRetention
|
|
||||||
}
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := o.fs.c.PutObjectRetention(ctx, &req,
|
|
||||||
s3.WithAPIOptions(smithyhttp.AddContentChecksumMiddleware))
|
|
||||||
return o.fs.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// setObjectLegalHold sets Object Lock legal hold on an object via PutObjectLegalHold API
|
|
||||||
//
|
|
||||||
// Note: We use smithyhttp.AddContentChecksumMiddleware to ensure Content-MD5 is
|
|
||||||
// calculated for the request body. The AWS SDK v2 switched from MD5 to CRC32 as
|
|
||||||
// the default checksum algorithm, but some S3-compatible providers (e.g. MinIO)
|
|
||||||
// still require Content-MD5 for PutObjectLegalHold requests.
|
|
||||||
// See: https://github.com/aws/aws-sdk-go-v2/discussions/2960
|
|
||||||
func (o *Object) setObjectLegalHold(ctx context.Context, status types.ObjectLockLegalHoldStatus) error {
|
|
||||||
bucket, bucketPath := o.split()
|
|
||||||
req := s3.PutObjectLegalHoldInput{
|
|
||||||
Bucket: &bucket,
|
|
||||||
Key: &bucketPath,
|
|
||||||
VersionId: o.versionID,
|
|
||||||
LegalHold: &types.ObjectLockLegalHold{
|
|
||||||
Status: status,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if o.fs.opt.RequesterPays {
|
|
||||||
req.RequestPayer = types.RequestPayerRequester
|
|
||||||
}
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := o.fs.c.PutObjectLegalHold(ctx, &req,
|
|
||||||
s3.WithAPIOptions(smithyhttp.AddContentChecksumMiddleware))
|
|
||||||
return o.fs.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// setObjectLockAfterUpload sets Object Lock via separate API calls after upload
|
|
||||||
// This is for S3 providers that don't support Object Lock headers during PUT
|
|
||||||
func (o *Object) setObjectLockAfterUpload(ctx context.Context, src fs.ObjectInfo) error {
|
|
||||||
// Determine the mode
|
|
||||||
var mode types.ObjectLockRetentionMode
|
|
||||||
modeOpt := o.fs.opt.ObjectLockMode
|
|
||||||
if strings.EqualFold(modeOpt, "copy") {
|
|
||||||
if srcObj, ok := src.(*Object); ok && srcObj.objectLockMode != nil {
|
|
||||||
mode = types.ObjectLockRetentionMode(*srcObj.objectLockMode)
|
|
||||||
}
|
|
||||||
} else if modeOpt != "" {
|
|
||||||
mode = types.ObjectLockRetentionMode(strings.ToUpper(modeOpt))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the retain until date
|
|
||||||
var retainUntilDate time.Time
|
|
||||||
dateOpt := o.fs.opt.ObjectLockRetainUntilDate
|
|
||||||
if strings.EqualFold(dateOpt, "copy") {
|
|
||||||
if srcObj, ok := src.(*Object); ok && srcObj.objectLockRetainUntilDate != nil {
|
|
||||||
retainUntilDate = *srcObj.objectLockRetainUntilDate
|
|
||||||
}
|
|
||||||
} else if dateOpt != "" {
|
|
||||||
var err error
|
|
||||||
retainUntilDate, err = parseRetainUntilDate(dateOpt)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid object_lock_retain_until_date %q: %w", dateOpt, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set retention if both mode and date are set
|
|
||||||
if mode != "" && !retainUntilDate.IsZero() {
|
|
||||||
if err := o.setObjectRetention(ctx, mode, retainUntilDate); err != nil {
|
|
||||||
return fmt.Errorf("failed to set object retention: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine and set legal hold
|
|
||||||
var legalHold types.ObjectLockLegalHoldStatus
|
|
||||||
legalHoldOpt := o.fs.opt.ObjectLockLegalHoldStatus
|
|
||||||
if strings.EqualFold(legalHoldOpt, "copy") {
|
|
||||||
if srcObj, ok := src.(*Object); ok && srcObj.objectLockLegalHoldStatus != nil {
|
|
||||||
legalHold = types.ObjectLockLegalHoldStatus(*srcObj.objectLockLegalHoldStatus)
|
|
||||||
}
|
|
||||||
} else if legalHoldOpt != "" {
|
|
||||||
legalHold = types.ObjectLockLegalHoldStatus(strings.ToUpper(legalHoldOpt))
|
|
||||||
}
|
|
||||||
|
|
||||||
if legalHold != "" {
|
|
||||||
if err := o.setObjectLegalHold(ctx, legalHold); err != nil {
|
|
||||||
return fmt.Errorf("failed to set legal hold: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
@@ -5171,7 +4804,7 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
metadata = make(fs.Metadata, len(o.meta)+10)
|
metadata = make(fs.Metadata, len(o.meta)+7)
|
||||||
for k, v := range o.meta {
|
for k, v := range o.meta {
|
||||||
switch k {
|
switch k {
|
||||||
case metaMtime:
|
case metaMtime:
|
||||||
@@ -5206,15 +4839,6 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
|||||||
setMetadata("content-disposition", o.contentDisposition)
|
setMetadata("content-disposition", o.contentDisposition)
|
||||||
setMetadata("content-encoding", o.contentEncoding)
|
setMetadata("content-encoding", o.contentEncoding)
|
||||||
setMetadata("content-language", o.contentLanguage)
|
setMetadata("content-language", o.contentLanguage)
|
||||||
|
|
||||||
// Set Object Lock metadata
|
|
||||||
setMetadata("object-lock-mode", o.objectLockMode)
|
|
||||||
if o.objectLockRetainUntilDate != nil {
|
|
||||||
formatted := o.objectLockRetainUntilDate.Format(time.RFC3339)
|
|
||||||
setMetadata("object-lock-retain-until-date", &formatted)
|
|
||||||
}
|
|
||||||
setMetadata("object-lock-legal-hold-status", o.objectLockLegalHoldStatus)
|
|
||||||
|
|
||||||
metadata["tier"] = o.GetTier()
|
metadata["tier"] = o.GetTier()
|
||||||
|
|
||||||
return metadata, nil
|
return metadata, nil
|
||||||
|
|||||||
@@ -498,236 +498,10 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
|||||||
// Purge gets tested later
|
// Purge gets tested later
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) InternalTestObjectLock(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Create a temporary bucket with Object Lock enabled to test on.
|
|
||||||
// This exercises our BucketObjectLockEnabled option and isolates
|
|
||||||
// the test from the main test bucket.
|
|
||||||
lockBucket := f.rootBucket + "-object-lock-" + random.String(8)
|
|
||||||
lockBucket = strings.ToLower(lockBucket)
|
|
||||||
|
|
||||||
// Try to create bucket with Object Lock enabled
|
|
||||||
objectLockEnabled := true
|
|
||||||
req := s3.CreateBucketInput{
|
|
||||||
Bucket: &lockBucket,
|
|
||||||
ACL: types.BucketCannedACL(f.opt.BucketACL),
|
|
||||||
ObjectLockEnabledForBucket: &objectLockEnabled,
|
|
||||||
}
|
|
||||||
if f.opt.LocationConstraint != "" {
|
|
||||||
req.CreateBucketConfiguration = &types.CreateBucketConfiguration{
|
|
||||||
LocationConstraint: types.BucketLocationConstraint(f.opt.LocationConstraint),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.c.CreateBucket(ctx, &req)
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Skipf("Object Lock not supported by this provider: CreateBucket with Object Lock failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify Object Lock is actually enabled on the new bucket.
|
|
||||||
// Some S3-compatible servers (e.g. rclone serve s3) accept the
|
|
||||||
// ObjectLockEnabledForBucket flag but don't actually implement Object Lock.
|
|
||||||
var lockCfg *s3.GetObjectLockConfigurationOutput
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
var err error
|
|
||||||
lockCfg, err = f.c.GetObjectLockConfiguration(ctx, &s3.GetObjectLockConfigurationInput{
|
|
||||||
Bucket: &lockBucket,
|
|
||||||
})
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil || lockCfg.ObjectLockConfiguration == nil ||
|
|
||||||
lockCfg.ObjectLockConfiguration.ObjectLockEnabled != types.ObjectLockEnabledEnabled {
|
|
||||||
_ = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.c.DeleteBucket(ctx, &s3.DeleteBucketInput{Bucket: &lockBucket})
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
t.Skipf("Object Lock not functional on this provider (GetObjectLockConfiguration: %v)", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Switch f to use the Object Lock bucket for this test
|
|
||||||
oldBucket := f.rootBucket
|
|
||||||
oldRoot := f.root
|
|
||||||
oldRootDir := f.rootDirectory
|
|
||||||
f.rootBucket = lockBucket
|
|
||||||
f.root = lockBucket
|
|
||||||
f.rootDirectory = ""
|
|
||||||
defer func() {
|
|
||||||
f.rootBucket = oldBucket
|
|
||||||
f.root = oldRoot
|
|
||||||
f.rootDirectory = oldRootDir
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Helper to remove an object with Object Lock protection
|
|
||||||
removeLocked := func(t *testing.T, obj fs.Object) {
|
|
||||||
t.Helper()
|
|
||||||
o := obj.(*Object)
|
|
||||||
// Remove legal hold if present
|
|
||||||
_ = o.setObjectLegalHold(ctx, types.ObjectLockLegalHoldStatusOff)
|
|
||||||
// Enable bypass governance retention for deletion
|
|
||||||
o.fs.opt.BypassGovernanceRetention = true
|
|
||||||
err := obj.Remove(ctx)
|
|
||||||
o.fs.opt.BypassGovernanceRetention = false
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up the temporary bucket after all sub-tests
|
|
||||||
defer func() {
|
|
||||||
// List and remove all object versions
|
|
||||||
var objectVersions []types.ObjectIdentifier
|
|
||||||
listReq := &s3.ListObjectVersionsInput{Bucket: &lockBucket}
|
|
||||||
for {
|
|
||||||
var resp *s3.ListObjectVersionsOutput
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
var err error
|
|
||||||
resp, err = f.c.ListObjectVersions(ctx, listReq)
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Logf("Failed to list object versions for cleanup: %v", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
for _, v := range resp.Versions {
|
|
||||||
objectVersions = append(objectVersions, types.ObjectIdentifier{
|
|
||||||
Key: v.Key,
|
|
||||||
VersionId: v.VersionId,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, m := range resp.DeleteMarkers {
|
|
||||||
objectVersions = append(objectVersions, types.ObjectIdentifier{
|
|
||||||
Key: m.Key,
|
|
||||||
VersionId: m.VersionId,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if !aws.ToBool(resp.IsTruncated) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
listReq.KeyMarker = resp.NextKeyMarker
|
|
||||||
listReq.VersionIdMarker = resp.NextVersionIdMarker
|
|
||||||
}
|
|
||||||
if len(objectVersions) > 0 {
|
|
||||||
bypass := true
|
|
||||||
_ = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.c.DeleteObjects(ctx, &s3.DeleteObjectsInput{
|
|
||||||
Bucket: &lockBucket,
|
|
||||||
BypassGovernanceRetention: &bypass,
|
|
||||||
Delete: &types.Delete{
|
|
||||||
Objects: objectVersions,
|
|
||||||
Quiet: aws.Bool(true),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
_ = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err := f.c.DeleteBucket(ctx, &s3.DeleteBucketInput{Bucket: &lockBucket})
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
}()
|
|
||||||
|
|
||||||
retainUntilDate := time.Now().UTC().Add(24 * time.Hour).Truncate(time.Second)
|
|
||||||
|
|
||||||
t.Run("Retention", func(t *testing.T) {
|
|
||||||
// Set Object Lock options for this test
|
|
||||||
f.opt.ObjectLockMode = "GOVERNANCE"
|
|
||||||
f.opt.ObjectLockRetainUntilDate = retainUntilDate.Format(time.RFC3339)
|
|
||||||
defer func() {
|
|
||||||
f.opt.ObjectLockMode = ""
|
|
||||||
f.opt.ObjectLockRetainUntilDate = ""
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Upload an object with Object Lock retention
|
|
||||||
contents := random.String(100)
|
|
||||||
item := fstest.NewItem("test-object-lock-retention", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
defer func() {
|
|
||||||
removeLocked(t, obj)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Read back metadata and verify Object Lock settings
|
|
||||||
o := obj.(*Object)
|
|
||||||
gotMetadata, err := o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "GOVERNANCE", gotMetadata["object-lock-mode"])
|
|
||||||
gotRetainDate, err := time.Parse(time.RFC3339, gotMetadata["object-lock-retain-until-date"])
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.WithinDuration(t, retainUntilDate, gotRetainDate, time.Second)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("LegalHold", func(t *testing.T) {
|
|
||||||
// Set Object Lock legal hold option
|
|
||||||
f.opt.ObjectLockLegalHoldStatus = "ON"
|
|
||||||
defer func() {
|
|
||||||
f.opt.ObjectLockLegalHoldStatus = ""
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Upload an object with legal hold
|
|
||||||
contents := random.String(100)
|
|
||||||
item := fstest.NewItem("test-object-lock-legal-hold", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
defer func() {
|
|
||||||
removeLocked(t, obj)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Verify legal hold is ON
|
|
||||||
o := obj.(*Object)
|
|
||||||
gotMetadata, err := o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "ON", gotMetadata["object-lock-legal-hold-status"])
|
|
||||||
|
|
||||||
// Set legal hold to OFF
|
|
||||||
err = o.setObjectLegalHold(ctx, types.ObjectLockLegalHoldStatusOff)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Clear cached metadata and re-read
|
|
||||||
o.meta = nil
|
|
||||||
gotMetadata, err = o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "OFF", gotMetadata["object-lock-legal-hold-status"])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("SetAfterUpload", func(t *testing.T) {
|
|
||||||
// Test the post-upload API path (PutObjectRetention + PutObjectLegalHold)
|
|
||||||
f.opt.ObjectLockSetAfterUpload = true
|
|
||||||
f.opt.ObjectLockMode = "GOVERNANCE"
|
|
||||||
f.opt.ObjectLockRetainUntilDate = retainUntilDate.Format(time.RFC3339)
|
|
||||||
f.opt.ObjectLockLegalHoldStatus = "ON"
|
|
||||||
defer func() {
|
|
||||||
f.opt.ObjectLockSetAfterUpload = false
|
|
||||||
f.opt.ObjectLockMode = ""
|
|
||||||
f.opt.ObjectLockRetainUntilDate = ""
|
|
||||||
f.opt.ObjectLockLegalHoldStatus = ""
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Upload an object - lock applied AFTER upload via separate API calls
|
|
||||||
contents := random.String(100)
|
|
||||||
item := fstest.NewItem("test-object-lock-after-upload", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
defer func() {
|
|
||||||
removeLocked(t, obj)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Verify all Object Lock settings were applied
|
|
||||||
o := obj.(*Object)
|
|
||||||
gotMetadata, err := o.Metadata(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, "GOVERNANCE", gotMetadata["object-lock-mode"])
|
|
||||||
gotRetainDate, err := time.Parse(time.RFC3339, gotMetadata["object-lock-retain-until-date"])
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.WithinDuration(t, retainUntilDate, gotRetainDate, time.Second)
|
|
||||||
assert.Equal(t, "ON", gotMetadata["object-lock-legal-hold-status"])
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("Metadata", f.InternalTestMetadata)
|
t.Run("Metadata", f.InternalTestMetadata)
|
||||||
t.Run("NoHead", f.InternalTestNoHead)
|
t.Run("NoHead", f.InternalTestNoHead)
|
||||||
t.Run("Versions", f.InternalTestVersions)
|
t.Run("Versions", f.InternalTestVersions)
|
||||||
t.Run("ObjectLock", f.InternalTestObjectLock)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go-v2/aws"
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -93,87 +92,3 @@ var (
|
|||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseRetainUntilDate(t *testing.T) {
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
wantErr bool
|
|
||||||
checkFunc func(t *testing.T, result time.Time)
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "RFC3339 date",
|
|
||||||
input: "2030-01-15T10:30:00Z",
|
|
||||||
wantErr: false,
|
|
||||||
checkFunc: func(t *testing.T, result time.Time) {
|
|
||||||
expected, _ := time.Parse(time.RFC3339, "2030-01-15T10:30:00Z")
|
|
||||||
assert.Equal(t, expected, result)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "RFC3339 date with timezone",
|
|
||||||
input: "2030-06-15T10:30:00+02:00",
|
|
||||||
wantErr: false,
|
|
||||||
checkFunc: func(t *testing.T, result time.Time) {
|
|
||||||
expected, _ := time.Parse(time.RFC3339, "2030-06-15T10:30:00+02:00")
|
|
||||||
assert.Equal(t, expected, result)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "duration days",
|
|
||||||
input: "365d",
|
|
||||||
wantErr: false,
|
|
||||||
checkFunc: func(t *testing.T, result time.Time) {
|
|
||||||
expected := now.Add(365 * 24 * time.Hour)
|
|
||||||
diff := result.Sub(expected)
|
|
||||||
assert.Less(t, diff.Abs(), 2*time.Second, "result should be ~365 days from now")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "duration hours",
|
|
||||||
input: "24h",
|
|
||||||
wantErr: false,
|
|
||||||
checkFunc: func(t *testing.T, result time.Time) {
|
|
||||||
expected := now.Add(24 * time.Hour)
|
|
||||||
diff := result.Sub(expected)
|
|
||||||
assert.Less(t, diff.Abs(), 2*time.Second, "result should be ~24 hours from now")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "duration minutes",
|
|
||||||
input: "30m",
|
|
||||||
wantErr: false,
|
|
||||||
checkFunc: func(t *testing.T, result time.Time) {
|
|
||||||
expected := now.Add(30 * time.Minute)
|
|
||||||
diff := result.Sub(expected)
|
|
||||||
assert.Less(t, diff.Abs(), 2*time.Second, "result should be ~30 minutes from now")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid input",
|
|
||||||
input: "not-a-date",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "empty input",
|
|
||||||
input: "",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result, err := parseRetainUntilDate(tt.input)
|
|
||||||
if tt.wantErr {
|
|
||||||
require.Error(t, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
if tt.checkFunc != nil {
|
|
||||||
tt.checkFunc(t, result)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
|||||||
"need_idx_progress": {"true"},
|
"need_idx_progress": {"true"},
|
||||||
"replace": {"1"},
|
"replace": {"1"},
|
||||||
}
|
}
|
||||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream")
|
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -519,12 +519,6 @@ Example:
|
|||||||
Help: `URL for HTTP CONNECT proxy
|
Help: `URL for HTTP CONNECT proxy
|
||||||
|
|
||||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||||
|
|
||||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
http://myUser:myPass@proxyhostname.example.com:8000
|
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -925,8 +919,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
opt.Port = "22"
|
opt.Port = "22"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up sshConfig here from opt
|
// get proxy URL if set
|
||||||
// **NB** everything else should be setup in NewFsWithConnection
|
if opt.HTTPProxy != "" {
|
||||||
|
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||||
|
}
|
||||||
|
f.proxyURL = proxyURL
|
||||||
|
}
|
||||||
|
|
||||||
sshConfig := &ssh.ClientConfig{
|
sshConfig := &ssh.ClientConfig{
|
||||||
User: opt.User,
|
User: opt.User,
|
||||||
Auth: []ssh.AuthMethod{},
|
Auth: []ssh.AuthMethod{},
|
||||||
@@ -1174,21 +1175,11 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
|||||||
f.mkdirLock = newStringLock()
|
f.mkdirLock = newStringLock()
|
||||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||||
f.savedpswd = ""
|
f.savedpswd = ""
|
||||||
|
|
||||||
// set the pool drainer timer going
|
// set the pool drainer timer going
|
||||||
if f.opt.IdleTimeout > 0 {
|
if f.opt.IdleTimeout > 0 {
|
||||||
f.drain = time.AfterFunc(time.Duration(f.opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
f.drain = time.AfterFunc(time.Duration(f.opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||||
}
|
}
|
||||||
|
|
||||||
// get proxy URL if set
|
|
||||||
if opt.HTTPProxy != "" {
|
|
||||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
|
||||||
}
|
|
||||||
f.proxyURL = proxyURL
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SlowHash: true,
|
SlowHash: true,
|
||||||
@@ -1258,7 +1249,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
|||||||
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
||||||
cwd, err := c.sftpClient.Getwd()
|
cwd, err := c.sftpClient.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
|
||||||
} else {
|
} else {
|
||||||
f.absRoot = path.Join(cwd, f.root)
|
f.absRoot = path.Join(cwd, f.root)
|
||||||
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
||||||
|
|||||||
@@ -163,7 +163,7 @@ func (f *Fs) refreshJWTToken(ctx context.Context) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("invalid token received from server")
|
return "", fmt.Errorf("invalid token received from server")
|
||||||
}
|
}
|
||||||
var claims map[string]any
|
var claims map[string]interface{}
|
||||||
if err := json.Unmarshal(payload, &claims); err != nil {
|
if err := json.Unmarshal(payload, &claims); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -182,7 +182,7 @@ func (f *Fs) refreshJWTToken(ctx context.Context) (string, error) {
|
|||||||
return f.token, nil
|
return f.token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) callAPI(ctx context.Context, method, path string, response any) (*http.Response, error) {
|
func (f *Fs) callAPI(ctx context.Context, method, path string, response interface{}) (*http.Response, error) {
|
||||||
token, err := f.refreshJWTToken(ctx)
|
token, err := f.refreshJWTToken(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"maps"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -204,7 +203,9 @@ func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, read
|
|||||||
var uploadRes *http.Response
|
var uploadRes *http.Response
|
||||||
if len(partURL.Headers) > 0 {
|
if len(partURL.Headers) > 0 {
|
||||||
opts.ExtraHeaders = make(map[string]string)
|
opts.ExtraHeaders = make(map[string]string)
|
||||||
maps.Copy(opts.ExtraHeaders, partURL.Headers)
|
for k, v := range partURL.Headers {
|
||||||
|
opts.ExtraHeaders[k] = v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.f.pacer.Call(func() (bool, error) {
|
err = s.f.pacer.Call(func() (bool, error) {
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ type Item struct {
|
|||||||
FileCount int32 `json:"FileCount,omitempty"`
|
FileCount int32 `json:"FileCount,omitempty"`
|
||||||
Name string `json:"Name,omitempty"`
|
Name string `json:"Name,omitempty"`
|
||||||
FileName string `json:"FileName,omitempty"`
|
FileName string `json:"FileName,omitempty"`
|
||||||
CreatedAt time.Time `json:"CreationDate"`
|
CreatedAt time.Time `json:"CreationDate,omitempty"`
|
||||||
ModifiedAt time.Time `json:"ClientModifiedDate"`
|
ModifiedAt time.Time `json:"ClientModifiedDate,omitempty"`
|
||||||
IsHidden bool `json:"IsHidden,omitempty"`
|
IsHidden bool `json:"IsHidden,omitempty"`
|
||||||
Size int64 `json:"FileSizeBytes,omitempty"`
|
Size int64 `json:"FileSizeBytes,omitempty"`
|
||||||
Type string `json:"odata.type,omitempty"`
|
Type string `json:"odata.type,omitempty"`
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ var SharedOptions = []fs.Option{{
|
|||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: strings.ReplaceAll(`Above this size files will be chunked.
|
Help: strings.ReplaceAll(`Above this size files will be chunked.
|
||||||
|
|
||||||
Above this size files will be chunked into a |`+segmentsContainerSuffix+`| container
|
Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container
|
||||||
or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
|
or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
|
||||||
for more info). Default for this is 5 GiB which is its maximum value, which
|
for more info). Default for this is 5 GiB which is its maximum value, which
|
||||||
means only files above this size will be chunked.
|
means only files above this size will be chunked.
|
||||||
|
|||||||
171
backend/uptobox/api/types.go
Normal file
171
backend/uptobox/api/types.go
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
// Package api provides types used by the Uptobox API.
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Error contains the error code and message returned by the API
|
||||||
|
type Error struct {
|
||||||
|
Success bool `json:"success,omitempty"`
|
||||||
|
StatusCode int `json:"statusCode,omitempty"`
|
||||||
|
Message string `json:"message,omitempty"`
|
||||||
|
Data string `json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns a string for the error and satisfies the error interface
|
||||||
|
func (e Error) Error() string {
|
||||||
|
out := fmt.Sprintf("api error %d", e.StatusCode)
|
||||||
|
if e.Message != "" {
|
||||||
|
out += ": " + e.Message
|
||||||
|
}
|
||||||
|
if e.Data != "" {
|
||||||
|
out += ": " + e.Data
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// FolderEntry represents a Uptobox subfolder when listing folder contents
|
||||||
|
type FolderEntry struct {
|
||||||
|
FolderID uint64 `json:"fld_id"`
|
||||||
|
Description string `json:"fld_descr"`
|
||||||
|
Password string `json:"fld_password"`
|
||||||
|
FullPath string `json:"fullPath"`
|
||||||
|
Path string `json:"fld_name"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FolderInfo represents the current folder when listing folder contents
|
||||||
|
type FolderInfo struct {
|
||||||
|
FolderID uint64 `json:"fld_id"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
FileCount uint64 `json:"fileCount"`
|
||||||
|
TotalFileSize int64 `json:"totalFileSize"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo represents a file when listing folder contents
|
||||||
|
type FileInfo struct {
|
||||||
|
Name string `json:"file_name"`
|
||||||
|
Description string `json:"file_descr"`
|
||||||
|
Created string `json:"file_created"`
|
||||||
|
Size int64 `json:"file_size"`
|
||||||
|
Downloads uint64 `json:"file_downloads"`
|
||||||
|
Code string `json:"file_code"`
|
||||||
|
Password string `json:"file_password"`
|
||||||
|
Public int `json:"file_public"`
|
||||||
|
LastDownload string `json:"file_last_download"`
|
||||||
|
ID uint64 `json:"id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMetadataResponse is the response when listing folder contents
|
||||||
|
type ReadMetadataResponse struct {
|
||||||
|
StatusCode int `json:"statusCode"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Data struct {
|
||||||
|
CurrentFolder FolderInfo `json:"currentFolder"`
|
||||||
|
Folders []FolderEntry `json:"folders"`
|
||||||
|
Files []FileInfo `json:"files"`
|
||||||
|
PageCount int `json:"pageCount"`
|
||||||
|
TotalFileCount int `json:"totalFileCount"`
|
||||||
|
TotalFileSize int64 `json:"totalFileSize"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadInfo is the response when initiating an upload
|
||||||
|
type UploadInfo struct {
|
||||||
|
StatusCode int `json:"statusCode"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Data struct {
|
||||||
|
UploadLink string `json:"uploadLink"`
|
||||||
|
MaxUpload string `json:"maxUpload"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse is the response to a successful upload
|
||||||
|
type UploadResponse struct {
|
||||||
|
Files []struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
DeleteURL string `json:"deleteUrl"`
|
||||||
|
} `json:"files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateResponse is a generic response to various action on files (rename/copy/move)
|
||||||
|
type UpdateResponse struct {
|
||||||
|
Message string `json:"message"`
|
||||||
|
StatusCode int `json:"statusCode"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download is the response when requesting a download link
|
||||||
|
type Download struct {
|
||||||
|
StatusCode int `json:"statusCode"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Data struct {
|
||||||
|
DownloadLink string `json:"dlLink"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetadataRequestOptions represents all the options when listing folder contents
|
||||||
|
type MetadataRequestOptions struct {
|
||||||
|
Limit uint64
|
||||||
|
Offset uint64
|
||||||
|
SearchField string
|
||||||
|
Search string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFolderRequest is used for creating a folder
|
||||||
|
type CreateFolderRequest struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFolderRequest is used for deleting a folder
|
||||||
|
type DeleteFolderRequest struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
FolderID uint64 `json:"fld_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyMoveFileRequest is used for moving/copying a file
|
||||||
|
type CopyMoveFileRequest struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
FileCodes string `json:"file_codes"`
|
||||||
|
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||||
|
Action string `json:"action"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFolderRequest is used for moving a folder
|
||||||
|
type MoveFolderRequest struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
FolderID uint64 `json:"fld_id"`
|
||||||
|
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||||
|
Action string `json:"action"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameFolderRequest is used for renaming a folder
|
||||||
|
type RenameFolderRequest struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
FolderID uint64 `json:"fld_id"`
|
||||||
|
NewName string `json:"new_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateFileInformation is used for renaming a file
|
||||||
|
type UpdateFileInformation struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
FileCode string `json:"file_code"`
|
||||||
|
NewName string `json:"new_name,omitempty"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Public string `json:"public,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveFileRequest is used for deleting a file
|
||||||
|
type RemoveFileRequest struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
FileCodes string `json:"file_codes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token represents the authentication token
|
||||||
|
type Token struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
}
|
||||||
1087
backend/uptobox/uptobox.go
Normal file
1087
backend/uptobox/uptobox.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/uptobox/uptobox_test.go
Normal file
21
backend/uptobox/uptobox_test.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
// Test Uptobox filesystem interface
|
||||||
|
package uptobox_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/uptobox"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
if *fstest.RemoteName == "" {
|
||||||
|
*fstest.RemoteName = "TestUptobox:"
|
||||||
|
}
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: *fstest.RemoteName,
|
||||||
|
NilObject: (*uptobox.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -348,7 +348,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
|||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Depth": depth,
|
"Depth": depth,
|
||||||
},
|
},
|
||||||
CheckRedirect: rest.PreserveMethodRedirectFn,
|
NoRedirect: true,
|
||||||
}
|
}
|
||||||
if f.hasOCMD5 || f.hasOCSHA1 {
|
if f.hasOCMD5 || f.hasOCSHA1 {
|
||||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||||
@@ -423,7 +423,7 @@ func (f *Fs) filePath(file string) string {
|
|||||||
if f.opt.Enc != encoder.EncodeZero {
|
if f.opt.Enc != encoder.EncodeZero {
|
||||||
subPath = f.opt.Enc.FromStandardPath(subPath)
|
subPath = f.opt.Enc.FromStandardPath(subPath)
|
||||||
}
|
}
|
||||||
return rest.URLPathEscapeAll(subPath)
|
return rest.URLPathEscape(subPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dirPath returns a directory path (f.root, dir)
|
// dirPath returns a directory path (f.root, dir)
|
||||||
@@ -610,7 +610,7 @@ func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool {
|
|||||||
|
|
||||||
// fetch the bearer token and set it if successful
|
// fetch the bearer token and set it if successful
|
||||||
func (f *Fs) fetchAndSetBearerToken() error {
|
func (f *Fs) fetchAndSetBearerToken() error {
|
||||||
_, err, _ := f.authSingleflight.Do("bearerToken", func() (any, error) {
|
_, err, _ := f.authSingleflight.Do("bearerToken", func() (interface{}, error) {
|
||||||
if len(f.opt.BearerTokenCommand) == 0 {
|
if len(f.opt.BearerTokenCommand) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -80,35 +80,3 @@ func TestHeaders(t *testing.T) {
|
|||||||
_, err := f.Features().About(context.Background())
|
_, err := f.Features().About(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestReservedCharactersInPathAreEscaped verifies that reserved characters
|
|
||||||
// like semicolons and equals signs in file paths are percent-encoded in
|
|
||||||
// HTTP requests to the WebDAV server (RFC 3986 compliance).
|
|
||||||
func TestReservedCharactersInPathAreEscaped(t *testing.T) {
|
|
||||||
var capturedPath string
|
|
||||||
|
|
||||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
capturedPath = r.RequestURI
|
|
||||||
// Return a 404 so the NewObject call fails cleanly
|
|
||||||
w.WriteHeader(http.StatusNotFound)
|
|
||||||
})
|
|
||||||
ts := httptest.NewServer(handler)
|
|
||||||
defer ts.Close()
|
|
||||||
|
|
||||||
configfile.Install()
|
|
||||||
m := configmap.Simple{
|
|
||||||
"type": "webdav",
|
|
||||||
"url": ts.URL,
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := webdav.NewFs(context.Background(), remoteName, "", m)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Try to access a file with a semicolon in the name.
|
|
||||||
// We expect the request to fail (404), but the path should be escaped.
|
|
||||||
_, _ = f.NewObject(context.Background(), "my;test")
|
|
||||||
|
|
||||||
// The semicolon must be percent-encoded as %3B
|
|
||||||
assert.Contains(t, capturedPath, "my%3Btest", "semicolons in path should be percent-encoded")
|
|
||||||
assert.NotContains(t, capturedPath, "my;test", "raw semicolons should not appear in path")
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ type WriteMultiMetadataRequest struct {
|
|||||||
|
|
||||||
// WriteMetadata is used to write item metadata
|
// WriteMetadata is used to write item metadata
|
||||||
type WriteMetadata struct {
|
type WriteMetadata struct {
|
||||||
Attributes WriteAttributes `json:"attributes"`
|
Attributes WriteAttributes `json:"attributes,omitempty"`
|
||||||
ID string `json:"id,omitempty"`
|
ID string `json:"id,omitempty"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||||||
params.Set("filename", url.QueryEscape(name))
|
params.Set("filename", url.QueryEscape(name))
|
||||||
params.Set("parent_id", parent)
|
params.Set("parent_id", parent)
|
||||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream")
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ docs = [
|
|||||||
"gui.md",
|
"gui.md",
|
||||||
"rc.md",
|
"rc.md",
|
||||||
"overview.md",
|
"overview.md",
|
||||||
"tiers.md",
|
|
||||||
"flags.md",
|
"flags.md",
|
||||||
"docker.md",
|
"docker.md",
|
||||||
"bisync.md",
|
"bisync.md",
|
||||||
@@ -44,11 +43,9 @@ docs = [
|
|||||||
"compress.md",
|
"compress.md",
|
||||||
"combine.md",
|
"combine.md",
|
||||||
"doi.md",
|
"doi.md",
|
||||||
"drime.md",
|
|
||||||
"dropbox.md",
|
"dropbox.md",
|
||||||
"filefabric.md",
|
"filefabric.md",
|
||||||
"filelu.md",
|
"filelu.md",
|
||||||
"filen.md",
|
|
||||||
"filescom.md",
|
"filescom.md",
|
||||||
"ftp.md",
|
"ftp.md",
|
||||||
"gofile.md",
|
"gofile.md",
|
||||||
@@ -62,7 +59,6 @@ docs = [
|
|||||||
"imagekit.md",
|
"imagekit.md",
|
||||||
"iclouddrive.md",
|
"iclouddrive.md",
|
||||||
"internetarchive.md",
|
"internetarchive.md",
|
||||||
"internxt.md",
|
|
||||||
"jottacloud.md",
|
"jottacloud.md",
|
||||||
"koofr.md",
|
"koofr.md",
|
||||||
"linkbox.md",
|
"linkbox.md",
|
||||||
@@ -93,6 +89,7 @@ docs = [
|
|||||||
"storj.md",
|
"storj.md",
|
||||||
"sugarsync.md",
|
"sugarsync.md",
|
||||||
"ulozto.md",
|
"ulozto.md",
|
||||||
|
"uptobox.md",
|
||||||
"union.md",
|
"union.md",
|
||||||
"webdav.md",
|
"webdav.md",
|
||||||
"yandex.md",
|
"yandex.md",
|
||||||
@@ -144,7 +141,7 @@ def read_doc(doc):
|
|||||||
contents = fd.read()
|
contents = fd.read()
|
||||||
parts = contents.split("---\n", 2)
|
parts = contents.split("---\n", 2)
|
||||||
if len(parts) != 3:
|
if len(parts) != 3:
|
||||||
raise ValueError(f"{doc}: Couldn't find --- markers: found {len(parts)} parts")
|
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
|
||||||
contents = parts[2].strip()+"\n\n"
|
contents = parts[2].strip()+"\n\n"
|
||||||
# Remove icons
|
# Remove icons
|
||||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||||
|
|||||||
@@ -1,304 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
Manage the backend yaml files in docs/data/backends
|
|
||||||
|
|
||||||
usage: manage_backends.py [-h] {create,features,update,help} [files ...]
|
|
||||||
|
|
||||||
Manage rclone backend YAML files.
|
|
||||||
|
|
||||||
positional arguments:
|
|
||||||
{create,features,update,help}
|
|
||||||
Action to perform
|
|
||||||
files List of YAML files to operate on
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
"""
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import yaml
|
|
||||||
import json
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import socket
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from pprint import pprint
|
|
||||||
|
|
||||||
# --- Configuration ---
|
|
||||||
|
|
||||||
# The order in which keys should appear in the YAML file
|
|
||||||
CANONICAL_ORDER = [
|
|
||||||
"backend",
|
|
||||||
"name",
|
|
||||||
"tier",
|
|
||||||
"maintainers",
|
|
||||||
"features_score",
|
|
||||||
"integration_tests",
|
|
||||||
"data_integrity",
|
|
||||||
"performance",
|
|
||||||
"adoption",
|
|
||||||
"docs",
|
|
||||||
"security",
|
|
||||||
"virtual",
|
|
||||||
"remote",
|
|
||||||
"features",
|
|
||||||
"hashes",
|
|
||||||
"precision"
|
|
||||||
]
|
|
||||||
|
|
||||||
# Default values for fields when creating/updating
|
|
||||||
DEFAULTS = {
|
|
||||||
"backend": None,
|
|
||||||
"name": None,
|
|
||||||
"tier": "Tier 4",
|
|
||||||
"maintainers": "External",
|
|
||||||
"features_score": 0,
|
|
||||||
"integration_tests": "Passing",
|
|
||||||
"data_integrity": "Other",
|
|
||||||
"performance": "High",
|
|
||||||
"adoption": "Some use",
|
|
||||||
"docs": "Full",
|
|
||||||
"security": "High",
|
|
||||||
"virtual": False,
|
|
||||||
"remote": "TestBackend:",
|
|
||||||
"features": [],
|
|
||||||
"hashes": [],
|
|
||||||
"precision": None
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- Test server management ---
|
|
||||||
|
|
||||||
def wait_for_tcp(address_str, delay=1, timeout=2, tries=60):
|
|
||||||
"""
|
|
||||||
Blocks until the specified TCP address (e.g., '172.17.0.3:21') is reachable.
|
|
||||||
"""
|
|
||||||
host, port = address_str.split(":")
|
|
||||||
port = int(port)
|
|
||||||
print(f"Waiting for {host}:{port}...")
|
|
||||||
for tri in range(tries):
|
|
||||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
|
||||||
sock.settimeout(timeout)
|
|
||||||
result = sock.connect_ex((host, port))
|
|
||||||
if result == 0:
|
|
||||||
print(f"Connected to {host}:{port} successfully!")
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(f"Failed to connect to {host}:{port} try {tri} !")
|
|
||||||
time.sleep(delay)
|
|
||||||
|
|
||||||
def parse_init_output(binary_input):
|
|
||||||
"""
|
|
||||||
Parse the output of the init script
|
|
||||||
"""
|
|
||||||
decoded_str = binary_input.decode('utf-8')
|
|
||||||
result = {}
|
|
||||||
for line in decoded_str.splitlines():
|
|
||||||
if '=' in line:
|
|
||||||
key, value = line.split('=', 1)
|
|
||||||
result[key.strip()] = value.strip()
|
|
||||||
return result
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def test_server(remote):
|
|
||||||
"""Start the test server for remote if needed"""
|
|
||||||
remote_name = remote.split(":",1)[0]
|
|
||||||
init_script = "fstest/testserver/init.d/" + remote_name
|
|
||||||
if not os.path.isfile(init_script):
|
|
||||||
yield
|
|
||||||
return
|
|
||||||
print(f"--- Starting {init_script} ---")
|
|
||||||
out = subprocess.check_output([init_script, "start"])
|
|
||||||
out = parse_init_output(out)
|
|
||||||
pprint(out)
|
|
||||||
# Configure the server with environment variables
|
|
||||||
env_keys = []
|
|
||||||
for key, value in out.items():
|
|
||||||
env_key = f"RCLONE_CONFIG_{remote_name.upper()}_{key.upper()}"
|
|
||||||
env_keys.append(env_key)
|
|
||||||
os.environ[env_key] = value
|
|
||||||
for key,var in os.environ.items():
|
|
||||||
if key.startswith("RCLON"):
|
|
||||||
print(key, var)
|
|
||||||
if "_connect" in out:
|
|
||||||
wait_for_tcp(out["_connect"])
|
|
||||||
try:
|
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
print(f"--- Stopping {init_script} ---")
|
|
||||||
subprocess.run([init_script, "stop"], check=True)
|
|
||||||
# Remove the env vars
|
|
||||||
for env_key in env_keys:
|
|
||||||
del os.environ[env_key]
|
|
||||||
|
|
||||||
# --- Helper Functions ---
|
|
||||||
|
|
||||||
def load_yaml(filepath):
|
|
||||||
if not os.path.exists(filepath):
|
|
||||||
return {}
|
|
||||||
with open(filepath, 'r', encoding='utf-8') as f:
|
|
||||||
return yaml.safe_load(f) or {}
|
|
||||||
|
|
||||||
def save_yaml(filepath, data):
|
|
||||||
# Reconstruct dictionary in canonical order
|
|
||||||
ordered_data = {}
|
|
||||||
|
|
||||||
# Add known keys in order
|
|
||||||
for key in CANONICAL_ORDER:
|
|
||||||
if key in data:
|
|
||||||
ordered_data[key] = data[key]
|
|
||||||
|
|
||||||
# Add any other keys that might exist (custom fields)
|
|
||||||
for key in data:
|
|
||||||
if key not in CANONICAL_ORDER:
|
|
||||||
ordered_data[key] = data[key]
|
|
||||||
|
|
||||||
# Ensure features are a sorted list (if present)
|
|
||||||
if 'features' in ordered_data and isinstance(ordered_data['features'], list):
|
|
||||||
ordered_data['features'].sort()
|
|
||||||
|
|
||||||
with open(filepath, 'w', encoding='utf-8') as f:
|
|
||||||
yaml.dump(ordered_data, f, default_flow_style=False, sort_keys=False, allow_unicode=True)
|
|
||||||
print(f"Saved {filepath}")
|
|
||||||
|
|
||||||
def get_backend_name_from_file(filepath):
|
|
||||||
"""
|
|
||||||
s3.yaml -> S3
|
|
||||||
azureblob.yaml -> Azureblob
|
|
||||||
"""
|
|
||||||
basename = os.path.basename(filepath)
|
|
||||||
name, _ = os.path.splitext(basename)
|
|
||||||
return name.title()
|
|
||||||
|
|
||||||
def fetch_rclone_features(remote_str):
|
|
||||||
"""
|
|
||||||
Runs `rclone backend features remote:` and returns the JSON object.
|
|
||||||
"""
|
|
||||||
cmd = ["rclone", "backend", "features", remote_str]
|
|
||||||
try:
|
|
||||||
with test_server(remote_str):
|
|
||||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
|
||||||
return json.loads(result.stdout)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
print(f"Error running rclone: {e.stderr}")
|
|
||||||
return None
|
|
||||||
except FileNotFoundError:
|
|
||||||
print("Error: 'rclone' command not found in PATH.")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# --- Verbs ---
|
|
||||||
|
|
||||||
def do_create(files):
|
|
||||||
for filepath in files:
|
|
||||||
if os.path.exists(filepath):
|
|
||||||
print(f"Skipping {filepath} (already exists)")
|
|
||||||
continue
|
|
||||||
|
|
||||||
data = DEFAULTS.copy()
|
|
||||||
# Set a default name based on filename
|
|
||||||
data['backend'] = get_backend_name_from_file(filepath)
|
|
||||||
data['name'] = data['backend'].title()
|
|
||||||
data['remote'] = "Test" + data['name'] + ":"
|
|
||||||
save_yaml(filepath, data)
|
|
||||||
|
|
||||||
def do_update(files):
|
|
||||||
for filepath in files:
|
|
||||||
if not os.path.exists(filepath):
|
|
||||||
print(f"Warning: {filepath} does not exist. Use 'create' first.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
data = load_yaml(filepath)
|
|
||||||
modified = False
|
|
||||||
|
|
||||||
# Inject the filename as the 'backend'
|
|
||||||
file_backend = os.path.splitext(os.path.basename(filepath))[0]
|
|
||||||
|
|
||||||
if data.get('backend') != file_backend:
|
|
||||||
data['backend'] = file_backend
|
|
||||||
modified = True
|
|
||||||
print(f"[{filepath}] Updated backend to: {file_backend}")
|
|
||||||
|
|
||||||
# Add missing default fields
|
|
||||||
for key, default_val in DEFAULTS.items():
|
|
||||||
if key not in data:
|
|
||||||
data[key] = default_val
|
|
||||||
modified = True
|
|
||||||
print(f"[{filepath}] Added missing field: {key}")
|
|
||||||
|
|
||||||
# Special handling for 'name' if it was just added as None or didn't exist
|
|
||||||
if data.get('name') is None:
|
|
||||||
data['name'] = get_backend_name_from_file(filepath)
|
|
||||||
modified = True
|
|
||||||
print(f"[{filepath}] Set default name: {data['name']}")
|
|
||||||
|
|
||||||
if modified:
|
|
||||||
save_yaml(filepath, data)
|
|
||||||
else:
|
|
||||||
# We save anyway to enforce canonical order if the file was messy
|
|
||||||
save_yaml(filepath, data)
|
|
||||||
|
|
||||||
def do_features(files):
|
|
||||||
for filepath in files:
|
|
||||||
if not os.path.exists(filepath):
|
|
||||||
print(f"Error: {filepath} not found.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
data = load_yaml(filepath)
|
|
||||||
remote = data.get('remote')
|
|
||||||
|
|
||||||
if not remote:
|
|
||||||
print(f"Error: [{filepath}] 'remote' field is missing or empty. Cannot fetch features.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
print(f"[{filepath}] Fetching features for remote: '{remote}'...")
|
|
||||||
rclone_data = fetch_rclone_features(remote)
|
|
||||||
|
|
||||||
if not rclone_data:
|
|
||||||
print(f"Failed to fetch data for {filepath}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Process Features (Dict -> Sorted List of True keys)
|
|
||||||
features_dict = rclone_data.get('Features', {})
|
|
||||||
# Filter only true values and sort keys
|
|
||||||
feature_list = sorted([k for k, v in features_dict.items() if v])
|
|
||||||
|
|
||||||
# Process Hashes
|
|
||||||
hashes_list = rclone_data.get('Hashes', [])
|
|
||||||
|
|
||||||
# Process Precision
|
|
||||||
precision = rclone_data.get('Precision')
|
|
||||||
|
|
||||||
# Update data
|
|
||||||
data['features'] = feature_list
|
|
||||||
data['hashes'] = hashes_list
|
|
||||||
data['precision'] = precision
|
|
||||||
|
|
||||||
save_yaml(filepath, data)
|
|
||||||
|
|
||||||
# --- Main CLI ---
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description="Manage rclone backend YAML files.")
|
|
||||||
parser.add_argument("verb", choices=["create", "features", "update", "help"], help="Action to perform")
|
|
||||||
parser.add_argument("files", nargs="*", help="List of YAML files to operate on")
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if args.verb == "help":
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
if not args.files:
|
|
||||||
print("Error: No files specified.")
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if args.verb == "create":
|
|
||||||
do_create(args.files)
|
|
||||||
elif args.verb == "update":
|
|
||||||
do_update(args.files)
|
|
||||||
elif args.verb == "features":
|
|
||||||
do_features(args.files)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -147,16 +147,6 @@ func ArchiveExtract(ctx context.Context, dst fs.Fs, dstDir string, src fs.Fs, sr
|
|||||||
// extract files
|
// extract files
|
||||||
err = ex.Extract(ctx, in, func(ctx context.Context, f archives.FileInfo) error {
|
err = ex.Extract(ctx, in, func(ctx context.Context, f archives.FileInfo) error {
|
||||||
remote := f.NameInArchive
|
remote := f.NameInArchive
|
||||||
// Strip leading "./" from archive paths. Tar files created with
|
|
||||||
// relative paths (e.g. "tar -czf archive.tar.gz .") use "./" prefixed
|
|
||||||
// entries. Without stripping, rclone encodes the "." as a full-width
|
|
||||||
// dot character creating a spurious directory. We only strip "./"
|
|
||||||
// specifically to avoid enabling path traversal attacks via "../".
|
|
||||||
remote = strings.TrimPrefix(remote, "./")
|
|
||||||
// If the entry was exactly "./" (the root dir), skip it
|
|
||||||
if remote == "" && f.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if dstDir != "" {
|
if dstDir != "" {
|
||||||
remote = path.Join(dstDir, remote)
|
remote = path.Join(dstDir, remote)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,62 +0,0 @@
|
|||||||
//go:build !plan9
|
|
||||||
|
|
||||||
package extract
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestStripDotSlashPrefix(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
expected string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "strip leading dot-slash from file",
|
|
||||||
input: "./file.txt",
|
|
||||||
expected: "file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "strip leading dot-slash from nested path",
|
|
||||||
input: "./subdir/file.txt",
|
|
||||||
expected: "subdir/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no prefix unchanged",
|
|
||||||
input: "file.txt",
|
|
||||||
expected: "file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested path unchanged",
|
|
||||||
input: "dir/file.txt",
|
|
||||||
expected: "dir/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "dot-dot-slash NOT stripped (path traversal safety)",
|
|
||||||
input: "../etc/passwd",
|
|
||||||
expected: "../etc/passwd",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "dot-slash directory entry becomes empty",
|
|
||||||
input: "./",
|
|
||||||
expected: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "only single leading dot-slash stripped",
|
|
||||||
input: "././file.txt",
|
|
||||||
expected: "./file.txt",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
// This mirrors the stripping logic in ArchiveExtract
|
|
||||||
got := strings.TrimPrefix(tc.input, "./")
|
|
||||||
assert.Equal(t, tc.expected, got)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -92,10 +92,10 @@ func TestCountWriterConcurrent(t *testing.T) {
|
|||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(goroutines)
|
wg.Add(goroutines)
|
||||||
for range goroutines {
|
for g := 0; g < goroutines; g++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for range loops {
|
for i := 0; i < loops; i++ {
|
||||||
n, err := cw.Write(data)
|
n, err := cw.Write(data)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, chunkSize, n)
|
assert.Equal(t, chunkSize, n)
|
||||||
|
|||||||
@@ -162,7 +162,7 @@ var commandDefinition = &cobra.Command{
|
|||||||
Long: longHelp,
|
Long: longHelp,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.58",
|
"versionIntroduced": "v1.58",
|
||||||
"groups": "Filter,Copy,Important,Sync",
|
"groups": "Filter,Copy,Important",
|
||||||
},
|
},
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
||||||
|
|||||||
@@ -128,7 +128,9 @@ func (b *bisyncRun) startLockRenewal() func() {
|
|||||||
}
|
}
|
||||||
stopLockRenewal := make(chan struct{})
|
stopLockRenewal := make(chan struct{})
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Go(func() {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
ticker := time.NewTicker(time.Duration(b.opt.MaxLock) - time.Minute)
|
ticker := time.NewTicker(time.Duration(b.opt.MaxLock) - time.Minute)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -139,7 +141,7 @@ func (b *bisyncRun) startLockRenewal() func() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
return func() {
|
return func() {
|
||||||
close(stopLockRenewal)
|
close(stopLockRenewal)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|||||||
@@ -361,7 +361,9 @@ func StartStats() func() {
|
|||||||
}
|
}
|
||||||
stopStats := make(chan struct{})
|
stopStats := make(chan struct{})
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Go(func() {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
ticker := time.NewTicker(*statsInterval)
|
ticker := time.NewTicker(*statsInterval)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -372,7 +374,7 @@ func StartStats() func() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
return func() {
|
return func() {
|
||||||
close(stopStats)
|
close(stopStats)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||||
|
|
||||||
package cmount
|
package cmount
|
||||||
|
|
||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -211,12 +210,6 @@ func (fsys *FS) Readdir(dirPath string,
|
|||||||
// We can't seek in directories and FUSE should know that so
|
// We can't seek in directories and FUSE should know that so
|
||||||
// return an error if ofst is ever set.
|
// return an error if ofst is ever set.
|
||||||
if ofst > 0 {
|
if ofst > 0 {
|
||||||
// However openbsd doesn't seem to know this - perhaps a bug in its
|
|
||||||
// FUSE implementation or a bug in cgofuse?
|
|
||||||
// See: https://github.com/billziss-gh/cgofuse/issues/49
|
|
||||||
if runtime.GOOS == "openbsd" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return -fuse.ESPIPE
|
return -fuse.ESPIPE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||||
|
|
||||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||||
//
|
//
|
||||||
@@ -8,9 +8,9 @@ package cmount
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
@@ -59,14 +59,12 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||||||
} else {
|
} else {
|
||||||
options = append(options, "-o", "fsname="+device)
|
options = append(options, "-o", "fsname="+device)
|
||||||
options = append(options, "-o", "subtype=rclone")
|
options = append(options, "-o", "subtype=rclone")
|
||||||
if runtime.GOOS != "openbsd" {
|
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
||||||
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
// This causes FUSE to supply O_TRUNC with the Open
|
||||||
// This causes FUSE to supply O_TRUNC with the Open
|
// call which is more efficient for cmount. However
|
||||||
// call which is more efficient for cmount. However
|
// it does not work with cgofuse on Windows with
|
||||||
// it does not work with cgofuse on Windows with
|
// WinFSP so cmount must work with or without it.
|
||||||
// WinFSP so cmount must work with or without it.
|
options = append(options, "-o", "atomic_o_trunc")
|
||||||
options = append(options, "-o", "atomic_o_trunc")
|
|
||||||
}
|
|
||||||
if opt.DaemonTimeout != 0 {
|
if opt.DaemonTimeout != 0 {
|
||||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
||||||
}
|
}
|
||||||
@@ -108,7 +106,7 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||||||
func waitFor(fn func() bool) (ok bool) {
|
func waitFor(fn func() bool) (ok bool) {
|
||||||
const totalWait = 10 * time.Second
|
const totalWait = 10 * time.Second
|
||||||
const individualWait = 10 * time.Millisecond
|
const individualWait = 10 * time.Millisecond
|
||||||
for range int(totalWait / individualWait) {
|
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||||
ok = fn()
|
ok = fn()
|
||||||
if ok {
|
if ok {
|
||||||
return ok
|
return ok
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows) && (!race || !windows)
|
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows)
|
||||||
|
|
||||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (openbsd && cgo && cmount) || (windows && cmount))
|
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
|
||||||
|
|
||||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -70,15 +70,6 @@ Note that |--stdout| and |--print-filename| are incompatible with |--urls|.
|
|||||||
This will do |--transfers| copies in parallel. Note that if |--auto-filename|
|
This will do |--transfers| copies in parallel. Note that if |--auto-filename|
|
||||||
is desired for all URLs then a file with only URLs and no filename can be used.
|
is desired for all URLs then a file with only URLs and no filename can be used.
|
||||||
|
|
||||||
Each FILENAME in the CSV file can start with a relative path which will be appended
|
|
||||||
to the destination path provided at the command line. For example, running the command
|
|
||||||
shown above with the following CSV file will write two files to the destination:
|
|
||||||
|remote:dir/local/path/bar.json| and |remote:dir/another/local/directory/qux.json|
|
|
||||||
|||csv
|
|
||||||
https://example.org/foo/bar.json,local/path/bar.json
|
|
||||||
https://example.org/qux/baz.json,another/local/directory/qux.json
|
|
||||||
|||
|
|
||||||
|
|
||||||
### Troubleshooting
|
### Troubleshooting
|
||||||
|
|
||||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||||
|
|||||||
@@ -341,7 +341,7 @@ func (h *testState) preconfigureServer() {
|
|||||||
// The `\\?\` prefix tells Windows APIs to pass strings unmodified to the
|
// The `\\?\` prefix tells Windows APIs to pass strings unmodified to the
|
||||||
// filesystem without additional parsing [1]. Our workaround is roughly to add
|
// filesystem without additional parsing [1]. Our workaround is roughly to add
|
||||||
// the prefix to whichever parameter doesn't have it (when the OS is Windows).
|
// the prefix to whichever parameter doesn't have it (when the OS is Windows).
|
||||||
// I'm not sure this generalizes, but it works for the kinds of inputs we're
|
// I'm not sure this generalizes, but it works for the the kinds of inputs we're
|
||||||
// throwing at it.
|
// throwing at it.
|
||||||
//
|
//
|
||||||
// [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
|
// [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ with the following options:
|
|||||||
- If ` + "`--files-only`" + ` is specified then files will be returned only,
|
- If ` + "`--files-only`" + ` is specified then files will be returned only,
|
||||||
no directories.
|
no directories.
|
||||||
|
|
||||||
If ` + "`--stat`" + ` is set then the output is not an array of items,
|
If ` + "`--stat`" + ` is set then the the output is not an array of items,
|
||||||
but instead a single JSON blob will be returned about the item pointed to.
|
but instead a single JSON blob will be returned about the item pointed to.
|
||||||
This will return an error if the item isn't found, however on bucket based
|
This will return an error if the item isn't found, however on bucket based
|
||||||
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will
|
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ at all, then 1 PiB is set as both the total and the free size.
|
|||||||
### Installing on Windows
|
### Installing on Windows
|
||||||
|
|
||||||
To run `rclone @ on Windows`, you will need to
|
To run `rclone @ on Windows`, you will need to
|
||||||
download and install [WinFsp](https://winfsp.dev).
|
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||||
|
|
||||||
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
||||||
Windows File System Proxy which makes it easy to write user space file
|
Windows File System Proxy which makes it easy to write user space file
|
||||||
@@ -324,7 +324,7 @@ full new copy of the file.
|
|||||||
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
||||||
as opposed to with a clear warning as in macFUSE.
|
as opposed to with a clear warning as in macFUSE.
|
||||||
|
|
||||||
### Mounting on Linux
|
## Mounting on Linux
|
||||||
|
|
||||||
On newer versions of Ubuntu, you may encounter the following error when running
|
On newer versions of Ubuntu, you may encounter the following error when running
|
||||||
`rclone mount`:
|
`rclone mount`:
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=m
|
|||||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
The vfsOpt are as described in options/get and can be seen in the
|
The vfsOpt are as described in options/get and can be seen in the the
|
||||||
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```console" + `
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ argument by passing a hyphen as an argument. This will use the first
|
|||||||
line of STDIN as the password not including the trailing newline.
|
line of STDIN as the password not including the trailing newline.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```console" + `
|
||||||
echo 'secretpassword' | rclone obscure -
|
echo "secretpassword" | rclone obscure -
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
If there is no data on STDIN to read, rclone obscure will default to
|
If there is no data on STDIN to read, rclone obscure will default to
|
||||||
|
|||||||
@@ -41,7 +41,9 @@ func startProgress() func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Go(func() {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
progressInterval := defaultProgressInterval
|
progressInterval := defaultProgressInterval
|
||||||
if ShowStats() && *statsInterval > 0 {
|
if ShowStats() && *statsInterval > 0 {
|
||||||
progressInterval = *statsInterval
|
progressInterval = *statsInterval
|
||||||
@@ -63,7 +65,7 @@ func startProgress() func() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
return func() {
|
return func() {
|
||||||
close(stopStats)
|
close(stopStats)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 704 B |
@@ -3,7 +3,6 @@ package http
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
_ "embed"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -58,9 +57,6 @@ var DefaultOpt = Options{
|
|||||||
// Opt is options set by command line flags
|
// Opt is options set by command line flags
|
||||||
var Opt = DefaultOpt
|
var Opt = DefaultOpt
|
||||||
|
|
||||||
//go:embed favicon.png
|
|
||||||
var faviconData []byte
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "http", Opt: &Opt, Options: OptionsInfo})
|
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "http", Opt: &Opt, Options: OptionsInfo})
|
||||||
}
|
}
|
||||||
@@ -202,11 +198,9 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
|||||||
|
|
||||||
router := s.server.Router()
|
router := s.server.Router()
|
||||||
router.Use(
|
router.Use(
|
||||||
middleware.Compress(5),
|
|
||||||
middleware.SetHeader("Accept-Ranges", "bytes"),
|
middleware.SetHeader("Accept-Ranges", "bytes"),
|
||||||
middleware.SetHeader("Server", "rclone/"+fs.Version),
|
middleware.SetHeader("Server", "rclone/"+fs.Version),
|
||||||
)
|
)
|
||||||
router.Get("/favicon.ico", s.serveFavicon)
|
|
||||||
router.Get("/*", s.handler)
|
router.Get("/*", s.handler)
|
||||||
router.Head("/*", s.handler)
|
router.Head("/*", s.handler)
|
||||||
|
|
||||||
@@ -231,27 +225,6 @@ func (s *HTTP) Shutdown() error {
|
|||||||
return s.server.Shutdown()
|
return s.server.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
// serveFavicon serves the remote's favicon.ico if it exists, otherwise
|
|
||||||
// the rclone favicon
|
|
||||||
func (s *HTTP) serveFavicon(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
VFS, err := s.getVFS(ctx)
|
|
||||||
if err == nil {
|
|
||||||
node, err := VFS.Stat("favicon.ico")
|
|
||||||
if err == nil && node.IsFile() {
|
|
||||||
// Remote has favicon.ico, serve it as a regular file
|
|
||||||
s.serveFile(w, r, "favicon.ico")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Serve the embedded rclone favicon
|
|
||||||
w.Header().Set("Content-Type", "image/png")
|
|
||||||
w.Header().Set("Cache-Control", "max-age=86400")
|
|
||||||
if _, err := w.Write(faviconData); err != nil {
|
|
||||||
fs.Debugf(nil, "Failed to write favicon: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handler reads incoming requests and dispatches them
|
// handler reads incoming requests and dispatches them
|
||||||
func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
|
func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
|
||||||
isDir := strings.HasSuffix(r.URL.Path, "/")
|
isDir := strings.HasSuffix(r.URL.Path, "/")
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"io"
|
"io"
|
||||||
@@ -298,116 +297,6 @@ func TestAuthProxy(t *testing.T) {
|
|||||||
testGET(t, true)
|
testGET(t, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFavicon(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
doGet := func(testURL, path string) *http.Response {
|
|
||||||
req, err := http.NewRequest("GET", testURL+path, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.SetBasicAuth(testUser, testPass)
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("fallback", func(t *testing.T) {
|
|
||||||
// testdata/files has no favicon.ico, so the embedded fallback is served
|
|
||||||
f, err := fs.NewFs(ctx, "testdata/files")
|
|
||||||
require.NoError(t, err)
|
|
||||||
s, testURL := start(ctx, t, f)
|
|
||||||
defer func() { assert.NoError(t, s.server.Shutdown()) }()
|
|
||||||
|
|
||||||
resp := doGet(testURL, "favicon.ico")
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
assert.Equal(t, "image/png", resp.Header.Get("Content-Type"))
|
|
||||||
assert.Equal(t, "max-age=86400", resp.Header.Get("Cache-Control"))
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, faviconData, body)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("remote override", func(t *testing.T) {
|
|
||||||
// Start a server on a temp dir that already contains a custom favicon.ico,
|
|
||||||
// so the VFS sees it at init time and serves it instead of the fallback.
|
|
||||||
dir := t.TempDir()
|
|
||||||
customFavicon := []byte("custom favicon data")
|
|
||||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "favicon.ico"), customFavicon, 0666))
|
|
||||||
|
|
||||||
f, err := fs.NewFs(ctx, dir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
s, testURL := start(ctx, t, f)
|
|
||||||
defer func() { assert.NoError(t, s.server.Shutdown()) }()
|
|
||||||
|
|
||||||
resp := doGet(testURL, "favicon.ico")
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, customFavicon, body)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCompressedDirectoryListing(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
require.NoError(t, setAllModTimes("testdata/files", expectedTime))
|
|
||||||
f, err := fs.NewFs(ctx, "testdata/files")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
s, testURL := start(ctx, t, f)
|
|
||||||
defer func() { assert.NoError(t, s.server.Shutdown()) }()
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", testURL, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.SetBasicAuth(testUser, testPass)
|
|
||||||
req.Header.Set("Accept-Encoding", "gzip")
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
assert.Equal(t, "gzip", resp.Header.Get("Content-Encoding"))
|
|
||||||
|
|
||||||
gr, err := gzip.NewReader(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = gr.Close() }()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(gr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Contains(t, string(body), "Directory listing of /")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCompressedTextFile(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
require.NoError(t, setAllModTimes("testdata/files", expectedTime))
|
|
||||||
f, err := fs.NewFs(ctx, "testdata/files")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
s, testURL := start(ctx, t, f)
|
|
||||||
defer func() { assert.NoError(t, s.server.Shutdown()) }()
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", testURL+"two.txt", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.SetBasicAuth(testUser, testPass)
|
|
||||||
req.Header.Set("Accept-Encoding", "gzip")
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
assert.Equal(t, "gzip", resp.Header.Get("Content-Encoding"))
|
|
||||||
|
|
||||||
gr, err := gzip.NewReader(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { _ = gr.Close() }()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(gr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "0123456789\n", string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRc(t *testing.T) {
|
func TestRc(t *testing.T) {
|
||||||
servetest.TestRc(t, rc.Params{
|
servetest.TestRc(t, rc.Params{
|
||||||
"type": "http",
|
"type": "http",
|
||||||
|
|||||||
@@ -66,9 +66,11 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
|
|||||||
func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) {
|
func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := range 100 {
|
for i := range 100 {
|
||||||
wg.Go(func() {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
testCacheCRUD(t, h, c, fmt.Sprintf("file-%d", i))
|
testCacheCRUD(t, h, c, fmt.Sprintf("file-%d", i))
|
||||||
})
|
}()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
@@ -77,7 +79,9 @@ func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) {
|
|||||||
func testCacheThrashSame(t *testing.T, h *Handler, c Cache) {
|
func testCacheThrashSame(t *testing.T, h *Handler, c Cache) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for range 100 {
|
for range 100 {
|
||||||
wg.Go(func() {
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
// Write a handle
|
// Write a handle
|
||||||
splitPath := []string{"file"}
|
splitPath := []string{"file"}
|
||||||
@@ -104,7 +108,7 @@ func testCacheThrashSame(t *testing.T, h *Handler, c Cache) {
|
|||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
assert.Equal(t, errStaleHandle, err)
|
assert.Equal(t, errStaleHandle, err)
|
||||||
}
|
}
|
||||||
})
|
}()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -291,7 +291,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Debugf(c.what, " - accepted: %v\n", ok)
|
fs.Debugf(c.what, " - accepted: %v\n", ok)
|
||||||
err := req.Reply(ok, reply)
|
err = req.Reply(ok, reply)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(c.what, "Failed to Reply to request: %v", err)
|
fs.Errorf(c.what, "Failed to Reply to request: %v", err)
|
||||||
return
|
return
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user