1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-10 04:23:28 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
1dc58a924c local: add debugging for link problem
See: https://forum.rclone.org/t/problem-with-symlinks-and-links/23840/13
2021-04-29 14:38:39 +01:00
259 changed files with 8286 additions and 23637 deletions

View File

@@ -267,15 +267,6 @@ jobs:
run: |
make
- name: install gomobile
run: |
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |

View File

@@ -32,40 +32,3 @@ jobs:
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build and publish docker volume plugin
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set plugin parameters
shell: bash
run: |
GITHUB_REF=${{ github.ref }}
PLUGIN_IMAGE_USER=rclone
PLUGIN_IMAGE_NAME=docker-volume-rclone
PLUGIN_IMAGE_TAG=${GITHUB_REF#refs/tags/}
PLUGIN_IMAGE=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:${PLUGIN_IMAGE_TAG}
PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_USER}/${PLUGIN_IMAGE_NAME}:latest
echo "PLUGIN_IMAGE_USER=${PLUGIN_IMAGE_USER}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE_NAME=${PLUGIN_IMAGE_NAME}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE_TAG=${PLUGIN_IMAGE_TAG}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE=${PLUGIN_IMAGE}" >> $GITHUB_ENV
echo "PLUGIN_IMAGE_LATEST=${PLUGIN_IMAGE_LATEST}" >> $GITHUB_ENV
- name: Build image
shell: bash
run: |
make docker-plugin
- name: Push image
shell: bash
run: |
docker login -u ${{ secrets.DOCKER_HUB_USER }} -p ${{ secrets.DOCKER_HUB_PASSWORD }}
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE}
make docker-plugin-push PLUGIN_IMAGE=${PLUGIN_IMAGE_LATEST}

3
.gitignore vendored
View File

@@ -11,6 +11,3 @@ rclone.iml
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db

View File

@@ -12,162 +12,95 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone version`)
* Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
* Rclone version (e.g. output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix ##
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
If it is a big feature then make an issue first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
Now in your terminal
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
go build
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
go version
Great, you can now compile and execute your own version of rclone:
go build
./rclone version
Finally make a branch to add your new feature
Make a branch to add your new feature
git checkout -b my-new-feature
And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - run the unit tests for the code you changed
When ready - test the affected functionality and run the unit tests for the code you changed
cd folder/with/changed/files
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
Make sure you
* Add [unit tests](#testing) for a new feature.
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
When you are done with that push your changes to Github:
When you are done with that
git push -u origin my-new-feature
and open the GitHub website to [create your pull
Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
You patch will get reviewed and you might get asked to fix some stuff.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
## Using Git and Github ##
### Committing your changes ###
Follow the guideline for [commit messages](#commit-messages) and then:
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits ###
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
git push --force origin my-new-feature
### Basing your changes on the latest master ###
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits ###
To combine your commits into one commit:
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
git commit # To commit the undone commits as one
otherwise, you may roll back using:
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration ###
## CI for your fork ##
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing ##
### Quick testing ###
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
You can also use `make`, if supported by your platform
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing ###
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
@@ -201,19 +134,12 @@ project root:
go install github.com/rclone/rclone/fstest/test_all
test_all -backend drive
### Full integration testing ###
If you want to run all the integration tests against all the remotes,
then change into the project root and run
make check
make test
The commands may require some extra go packages which you can install with
make build_dep
The full integration tests are run daily on the integration test server. You can
This command is run daily on the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
@@ -228,7 +154,6 @@ with modules beneath.
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* cmdtest - end-to-end tests of commands, flags, environment variables,...
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file

2377
MANUAL.html generated

File diff suppressed because it is too large Load Diff

3001
MANUAL.md generated

File diff suppressed because it is too large Load Diff

3139
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -256,36 +256,3 @@ startstable:
winzip:
zip -9 rclone-$(TAG).zip rclone.exe
# docker volume plugin
PLUGIN_IMAGE_USER ?= rclone
PLUGIN_IMAGE_TAG ?= latest
PLUGIN_IMAGE_NAME ?= docker-volume-rclone
PLUGIN_IMAGE ?= $(PLUGIN_IMAGE_USER)/$(PLUGIN_IMAGE_NAME):$(PLUGIN_IMAGE_TAG)
PLUGIN_BASE_IMAGE := rclone/rclone:latest
PLUGIN_BUILD_DIR := ./build/docker-plugin
PLUGIN_CONTRIB_DIR := ./cmd/serve/docker/contrib/plugin
PLUGIN_CONFIG := $(PLUGIN_CONTRIB_DIR)/config.json
PLUGIN_DOCKERFILE := $(PLUGIN_CONTRIB_DIR)/Dockerfile
PLUGIN_CONTAINER := docker-volume-rclone-dev-$(shell date +'%Y%m%d-%H%M%S')
docker-plugin: docker-plugin-rootfs docker-plugin-create
docker-plugin-image: rclone
docker build --no-cache --pull --build-arg BASE_IMAGE=${PLUGIN_BASE_IMAGE} -t ${PLUGIN_IMAGE} -f ${PLUGIN_DOCKERFILE} .
docker-plugin-rootfs: docker-plugin-image
mkdir -p ${PLUGIN_BUILD_DIR}/rootfs
docker create --name ${PLUGIN_CONTAINER} ${PLUGIN_IMAGE}
docker export ${PLUGIN_CONTAINER} | tar -x -C ${PLUGIN_BUILD_DIR}/rootfs
docker rm -vf ${PLUGIN_CONTAINER}
cp ${PLUGIN_CONFIG} ${PLUGIN_BUILD_DIR}/config.json
docker-plugin-create:
docker plugin rm -f ${PLUGIN_IMAGE} 2>/dev/null || true
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
docker-plugin-push: docker-plugin-create
docker plugin push ${PLUGIN_IMAGE}
docker plugin rm ${PLUGIN_IMAGE}

View File

@@ -62,7 +62,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
@@ -88,6 +87,7 @@ Please see [the full list of all storage providers and their features](https://r
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna

View File

@@ -1 +1 @@
v1.57.0
v1.56.0

View File

@@ -69,10 +69,12 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: acdConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "checkpoint",

View File

@@ -80,12 +80,13 @@ func init() {
Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
$ az ad sp create-for-rbac --name "<name>" \
$ az sp create-for-rbac --name "<name>" \
--role "Storage Blob Data Owner" \
--scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
> azure-principal.json
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli)
for more details.
`,
}, {
Name: "key",

View File

@@ -83,7 +83,7 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
@@ -92,15 +92,16 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
return errors.Wrap(err, "failed to configure token with jwt authentication")
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token with oauth authentication")
}
}
return nil, nil
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",

View File

@@ -339,14 +339,8 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
var warnDeprecated sync.Once
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)

View File

@@ -182,71 +182,32 @@ func init() {
Description: "Google Drive",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct")
return errors.Wrap(err, "couldn't parse config into struct")
}
switch config.State {
case "":
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
}
return fs.ConfigGoto("teamdrive")
case "teamdrive":
if opt.TeamDriveID == "" {
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
}
return fs.ConfigConfirm("teamdrive_change", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
case "teamdrive_ok":
if config.Result == "false" {
m.Set("team_drive", "")
return nil, nil
}
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_change":
if config.Result == "false" {
return nil, nil
}
return fs.ConfigGoto("teamdrive_config")
case "teamdrive_config":
f, err := newFs(ctx, name, "", m)
if err != nil {
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
}
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return nil, err
}
if len(teamDrives) == 0 {
return fs.ConfigError("", "No Shared Drives found in your account")
}
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
teamDrive := teamDrives[i]
return teamDrive.Id, teamDrive.Name
})
case "teamdrive_final":
driveID := config.Result
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil, nil
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
return nil, fmt.Errorf("unknown state %q", config.State)
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
}
err = configTeamDrive(ctx, opt, m, name)
if err != nil {
return errors.Wrap(err, "failed to configure Shared Drive")
}
return nil
},
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
@@ -987,6 +948,48 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
return
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
ci := fs.GetConfig(ctx)
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
} else {
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
return nil
}
f, err := newFs(ctx, name, "", m)
if err != nil {
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
}
fmt.Printf("Fetching Shared Drive list...\n")
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return err
}
if len(teamDrives) == 0 {
fmt.Printf("No Shared Drives found in your account")
return nil
}
var driveIDs, driveNames []string
for _, teamDrive := range teamDrives {
driveIDs = append(driveIDs, teamDrive.Id)
driveNames = append(driveNames, teamDrive.Name)
}
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
@@ -1165,7 +1168,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
}
}
f.rootFolderID = rootID
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
}
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
@@ -1328,8 +1331,8 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
//
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum it is a file stored on drive
if info.Md5Checksum != "" {
// If item has MD5 sum or a length it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 {
return f.newRegularObject(remote, info), nil
}
@@ -1362,8 +1365,8 @@ func (f *Fs) newObjectWithExportInfo(
// Pretend a dangling shortcut is a regular object
// It will error if used, but appear in listings so it can be deleted
return f.newRegularObject(remote, info), nil
case info.Md5Checksum != "":
// If item has MD5 sum it is a file stored on drive
case info.Md5Checksum != "" || info.Size > 0:
// If item has MD5 sum or a length it is a file stored on drive
return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)

View File

@@ -1,350 +0,0 @@
// This file contains the implementation of the sync batcher for uploads
//
// Dropbox rules say you can start as many batches as you want, but
// you may only have one batch being committed and must wait for the
// batch to be finished before committing another.
package dropbox
import (
"context"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, errors.Wrap(err, "batch commit failed")
}
return batchStatus, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxTries = 120
for try := 1; try <= maxTries; try++ {
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
} else {
if batchStatus.Tag == "complete" {
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > time.Second {
sleepTime = time.Second
}
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
batchStatus, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
} else {
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
errorTag += "/" + item.Failure.LookupFailed.Tag
}
if item.Failure.Path != nil {
errorTag += "/" + item.Failure.Path.Tag
}
if item.Failure.PropertiesError != nil {
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -138,19 +138,23 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: getOauthConfig(m),
NoOffline: true,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
opt := oauthutil.Options{
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
})
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
@@ -209,63 +213,6 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -289,10 +236,6 @@ type Options struct {
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -312,7 +255,6 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -328,6 +270,8 @@ type Object struct {
hash string // content_hash of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -438,10 +382,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
cfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
@@ -1441,13 +1381,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1607,83 +1540,97 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// uploadChunked uploads the object in parts
//
// Will introduce two additional network requests to start and finish the session.
// If the size is unknown (i.e. -1) the method incurs one additional
// request to the Dropbox API that does not carry a payload to close the append session.
// Will work optimally if size is >= uploadChunkSize. If the size is either
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
// start upload
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
}
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
fmtChunk := func(cur int, last bool) {
if chunks == 0 && last {
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
} else if chunks == 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
}
}
// write the first chunk
fmtChunk(1, false)
var res *files.UploadSessionStartResult
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
chunkSize := int64(o.fs.opt.ChunkSize)
chunks, remainder := size/chunkSize, size%chunkSize
if remainder > 0 {
chunks++
}
// write chunks
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
cursor := files.UploadSessionCursor{
SessionId: res.SessionId,
Offset: 0,
}
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
for currentChunk := 1; ; currentChunk++ {
cursor.Offset = in.BytesRead()
appendArg := files.UploadSessionAppendArg{
Cursor: &cursor,
Close: false,
}
if chunks < 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
// write more whole chunks (if any)
currentChunk := 2
for {
if chunks > 0 && currentChunk >= chunks {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
break
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after session is started, we retry everything
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, err
}
if appendArg.Close {
break
}
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
}
currentChunk++
}
// finish upload
// write the remains
cursor.Offset = in.BytesRead()
args := &files.UploadSessionFinishArg{
Cursor: &cursor,
Commit: commitInfo,
}
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, args)
}
fmtChunk(currentChunk, true)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -1750,7 +1697,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1761,15 +1708,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "upload failed")
}
// If we haven't received data back from batch upload then fake it
//
// This will only happen if we are uploading async batches
if entry == nil {
o.bytes = size
o.modTime = commitInfo.ClientModified
o.hash = "" // we don't have this
return nil
}
return o.setMetadataFromEntry(entry)
}
@@ -1797,7 +1735,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -4,7 +4,6 @@ import (
"context"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
@@ -87,16 +86,10 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
return &file, err
}
// maybe do some actual validation later if necessary
func validToken(token *GetTokenResponse) bool {
return token.Status == "OK"
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
Pass: f.opt.FilePassword,
}
opts := rest.Opts{
Method: "POST",
@@ -106,8 +99,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
doretry, err := shouldRetry(ctx, resp, err)
return doretry || !validToken(&token), err
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -126,16 +118,10 @@ func fileFromSharedFile(file *SharedFile) File {
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
ContentType: "application/x-www-form-urlencoded",
}
if f.opt.FolderPassword != "" {
opts.Method = "POST"
opts.Parameters = nil
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
@@ -325,7 +311,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.Errorf("can't remove folder: %s", response.Message)
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -410,34 +396,6 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
request := &RenameFileRequest{
URLs: []RenameFileURL{
{
URL: url,
Filename: newName,
},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rename.cgi",
}
response = &RenameFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")

View File

@@ -44,18 +44,6 @@ func init() {
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter",
Name: "file_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -87,11 +75,9 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
@@ -437,45 +423,25 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Find current directory ID
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// If it is in the correct directory, just rename it
var url string
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, url)
file, err := f.readFileInfo(ctx, resp.URLs[0])
if err != nil {
return nil, errors.New("couldn't read file data")
}
@@ -506,7 +472,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)

View File

@@ -19,7 +19,6 @@ type ListFilesRequest struct {
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -64,9 +63,8 @@ type MoveFileRequest struct {
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
URLs []string `json:"urls"`
Status string `json:"status"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
@@ -78,10 +76,9 @@ type CopyFileRequest struct {
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
@@ -90,30 +87,6 @@ type FileCopy struct {
ToURL string `json:"to_url"`
}
// RenameFileURL is the data structure to rename a single file
type RenameFileURL struct {
URL string `json:"url"`
Filename string `json:"filename"`
}
// RenameFileRequest is the request structure of the corresponding request
type RenameFileRequest struct {
URLs []RenameFileURL `json:"urls"`
Pretty int `json:"pretty"`
}
// RenameFileResponse is the response structure of the corresponding request
type RenameFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Renamed int `json:"renamed"`
URLs []struct {
URL string `json:"url"`
OldFilename string `json:"old_filename"`
NewFilename string `json:"new_filename"`
} `json:"urls"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`

View File

@@ -5,7 +5,6 @@ package api
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
@@ -52,23 +51,6 @@ func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Int represents an integer which can be represented in JSON as a
// quoted integer or an integer.
type Int int
// MarshalJSON turns a Int into JSON
func (i *Int) MarshalJSON() (out []byte, err error) {
return json.Marshal((*int)(i))
}
// UnmarshalJSON turns JSON into a Int
func (i *Int) UnmarshalJSON(data []byte) error {
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
data = data[1 : len(data)-1]
}
return json.Unmarshal(data, (*int)(i))
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
@@ -133,7 +115,7 @@ type GetFolderContentsResponse struct {
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From Int `json:"from"`
From int `json:"from,string"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`

View File

@@ -1050,16 +1050,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.Wrap(err, "Update")
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers
if err != nil {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
}
}
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()

View File

@@ -21,7 +21,6 @@ import (
"io/ioutil"
"net/http"
"path"
"strconv"
"strings"
"time"
@@ -51,10 +50,10 @@ import (
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormat = time.RFC3339Nano
metaMtime = "mtime" // key to store mtime in metadata
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
listChunks = 1000 // chunk size to read directory listings
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
@@ -76,16 +75,18 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return nil, nil
return nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: storageConfig,
})
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
@@ -921,7 +922,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormat, mtimeString)
modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil {
o.modTime = modTime
return
@@ -929,19 +930,8 @@ func (o *Object) setMetaData(info *storage.Object) {
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to GSUtil mtime
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
if ok {
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
if err == nil {
o.modTime = time.Unix(unixTimeSec, 0)
return
}
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormat, info.Updated)
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
@@ -998,8 +988,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormat)
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
metadata[metaMtime] = modTime.Format(timeFormatOut)
return metadata
}
@@ -1011,11 +1000,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
return err
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = modTime.Format(timeFormat)
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()

View File

@@ -53,7 +53,6 @@ const (
minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
@@ -62,7 +61,7 @@ var (
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
scopeReadWrite,
},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
@@ -78,36 +77,36 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct")
return errors.Wrap(err, "couldn't parse config into struct")
}
switch config.State {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning_done":
return nil, nil
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
}
return nil, fmt.Errorf("unknown state %q", config.State)
// Do the oauth
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",

View File

@@ -37,7 +37,7 @@ func init() {
Help: `Kerberos service principal name for the namenode
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode.`,
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",

View File

@@ -55,10 +55,12 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})

View File

@@ -48,29 +48,37 @@ const (
rootURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configClientID = "client_id"
configClientSecret = "client_secret"
configUsername = "username"
configVersion = 1
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
defaultClientID = "jottacli"
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
v1configVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
// needs to be done early so we can use oauth during config
@@ -78,7 +86,44 @@ func init() {
Name: "jottacloud",
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return errors.Wrap(err, "failed to parse config version - corrupted config")
}
refresh = (ver != configVersion) && (ver != v1configVersion)
}
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return nil
}
}
}
fmt.Printf("Choose authentication type:\n" +
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
switch config.ChooseNumber("Your choice", 1, 3) {
case 1:
return v2config(ctx, name, m)
case 2:
return v1config(ctx, name, m)
case 3:
return teliaCloudConfig(ctx, name, m)
default:
return errors.New("unknown config choice")
}
},
Options: []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
@@ -99,11 +144,6 @@ func init() {
Help: "Files bigger than this can be resumed if the upload fail's.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "no_versions",
Help: "Avoid server side versioning by deleting files and recreating files instead of overwriting them.",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -118,183 +158,6 @@ func init() {
})
}
// Config runs the backend configuration protocol
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State {
case "":
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
Value: "standard",
Help: "Standard authentication - use this if you're a normal Jottacloud user.",
}, {
Value: "legacy",
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia",
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(config.Result)
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
m.Set("configVersion", fmt.Sprint(configVersion))
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
case "standard_token":
loginToken := config.Result
m.Set(configClientID, defaultClientID)
m.Set(configClientSecret, "")
srv := rest.NewClient(fshttp.NewClient(ctx))
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
}
m.Set(configTokenURL, tokenEndpoint)
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
}
return fs.ConfigGoto("choose_device")
case "legacy": // configure a jottacloud backend using legacy authentication
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
Rclone has it's own Jottacloud API KEY which works fine as long as one
only uses rclone on a single machine. When you want to use rclone with
this account on more than one machine it's recommended to create a
machine specific API key. These keys can NOT be shared between
machines.`)
case "legacy_api":
srv := rest.NewClient(fshttp.NewClient(ctx))
if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return nil, errors.Wrap(err, "failed to register device")
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
case "legacy_username":
m.Set(configUsername, config.Result)
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
case "legacy_password":
m.Set("password", config.Result)
m.Set("auth_code", "")
return fs.ConfigGoto("legacy_do_auth")
case "legacy_auth_code":
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
m.Set("auth_code", authCode)
return fs.ConfigGoto("legacy_do_auth")
case "legacy_do_auth":
username, _ := m.Get(configUsername)
password, _ := m.Get("password")
password = obscure.MustReveal(password)
authCode, _ := m.Get("auth_code")
srv := rest.NewClient(fshttp.NewClient(ctx))
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
}
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: legacyTokenURL,
},
ClientID: clientID,
ClientSecret: obscure.MustReveal(clientSecret),
}
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
if err == errAuthCodeRequired {
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
}
m.Set("password", "")
m.Set("auth_code", "")
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
}
return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
case "choose_device_query":
if config.Result != "true" {
m.Set(configDevice, "")
m.Set(configMountpoint, "")
return fs.ConfigGoto("end")
}
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
m.Set(configUsername, cust.Username)
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
return acc.Devices[i].Name, ""
})
case "choose_device_result":
device := config.Result
m.Set(configDevice, device)
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
username, _ := m.Get(configUsername)
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
return dev.MountPoints[i].Name, ""
})
case "choose_device_mountpoint":
mountpoint := config.Result
m.Set(configMountpoint, mountpoint)
return fs.ConfigGoto("end")
case "end":
// All the config flows end up here in case we need to carry on with something
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
type Options struct {
Device string `config:"device"`
@@ -302,7 +165,6 @@ type Options struct {
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"`
NoVersions bool `config:"no_versions"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -356,21 +218,10 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// joinPath joins two path/url elements
//
// Does not perform clean on the result like path.Join does,
// which breaks urls by changing prefix "https://" into "https:/".
func joinPath(base string, rel string) string {
if rel == "" {
return base
}
if strings.HasSuffix(base, "/") {
return base + strings.TrimPrefix(rel, "/")
}
if strings.HasPrefix(rel, "/") {
return strings.TrimSuffix(base, "/") + rel
}
return base + "/" + rel
// parsePath parses a box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// retryErrorCodes is a slice of error codes that we will retry
@@ -392,6 +243,111 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) error {
teliaCloudOauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
return errors.Wrap(err, "failed to setup mountpoint")
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
return nil
}
// v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) error {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return errors.Wrap(err, "failed to register device")
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.AuthURL = v1tokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuthV1(ctx, srv, username, password)
if err != nil {
return errors.Wrap(err, "failed to get oauth token")
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return errors.Wrap(err, "error while saving token")
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
return errors.Wrap(err, "failed to setup mountpoint")
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(v1configVersion))
return nil
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
@@ -410,7 +366,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
opts := rest.Opts{
Method: "POST",
RootURL: legacyRegisterURL,
RootURL: v1registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
@@ -421,13 +377,8 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err
}
var errAuthCodeRequired = errors.New("auth code required")
// doLegacyAuth runs the actual token request for V1 authentication
//
// Call this first with blank authCode. If errAuthCodeRequired is
// returned then call it again with an authCode
func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Config, username, password, authCode string) (token oauth2.Token, err error) {
// doAuthV1 runs the actual token request for V1 authentication
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
@@ -441,19 +392,22 @@ func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Con
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
if authCode != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
}
// do the first request
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil && authCode == "" {
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
return token, errAuthCodeRequired
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
@@ -465,11 +419,52 @@ func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Con
return token, err
}
// doTokenAuth runs the actual token request for V2 authentication
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) error {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
return errors.Wrap(err, "failed to get oauth token")
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return errors.Wrap(err, "error while saving token")
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
return errors.Wrap(err, "failed to setup mountpoint")
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
return nil
}
// doAuthV2 runs the actual token request for V2 authentication
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, "", err
return token, err
}
// decode login token
@@ -477,7 +472,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, "", err
return token, err
}
// retrieve endpoint urls
@@ -486,14 +481,19 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
RootURL: loginToken.WellKnownLink,
}
var wellKnown api.WellKnown
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return token, "", err
return token, err
}
// save the tokenurl
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
m.Set(configTokenURL, wellKnown.TokenEndpoint)
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", defaultClientID)
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
@@ -501,33 +501,68 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
values.Encode()
opts = rest.Opts{
Method: "POST",
RootURL: wellKnown.TokenEndpoint,
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
}
// do the first request
var jsonToken api.TokenJSON
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, "", err
return token, err
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, wellKnown.TokenEndpoint, err
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return "", "", err
}
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return "", "", err
}
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
device = config.Choose("Devices", deviceNames, nil, false)
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
if err != nil {
return "", "", err
}
if len(dev.MountPoints) == 0 {
return "", "", errors.New("no mountpoints for selected device")
}
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
return device, mountpoint, err
}
// getCustomerInfo queries general information about the account
func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.CustomerInfo, err error) {
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: "account/v1/customer",
}
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
_, err = srv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info")
}
@@ -644,7 +679,7 @@ func (f *Fs) filePath(file string) string {
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() {
if v1tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
@@ -660,50 +695,53 @@ func grantTypeFilter(req *http.Request) {
}
}
func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuthClient *http.Client, ts *oauthutil.TokenSource, err error) {
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Check config version
var ver int
version, ok := m.Get("configVersion")
if ok {
ver, err = strconv.Atoi(version)
if err != nil {
return nil, nil, errors.New("Failed to parse config version")
return nil, errors.New("Failed to parse config version")
}
ok = (ver == configVersion) || (ver == legacyConfigVersion)
ok = (ver == configVersion) || (ver == v1configVersion)
}
if !ok {
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
return nil, errors.New("Outdated config - please reconfigure this backend")
}
baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
}
if ver == configVersion {
oauthConfig.ClientID = defaultClientID
oauthConfig.ClientID = "jottacli"
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == legacyConfigVersion {
} else if ver == v1configVersion {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.Endpoint.AuthURL = legacyTokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
oauthConfig.Endpoint.AuthURL = v1tokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {
@@ -716,29 +754,13 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
}
// Create OAuth Client
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
return oAuthClient, ts, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
oAuthClient, ts, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
rootIsDir := strings.HasSuffix(root, "/")
root = strings.Trim(root, "/")
root = parsePath(root)
f := &Fs{
name: name,
@@ -1276,7 +1298,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if result.PublicSharePath == "" {
return "", errors.New("couldn't create public link - no link path received")
}
return joinPath(baseURL, result.PublicSharePath), nil
link = path.Join(baseURL, result.PublicSharePath)
return link, nil
}
// About gets quota information
@@ -1500,20 +1523,6 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.opt.NoVersions {
err := o.readMetaData(ctx, false)
if err == nil {
// if the object exists delete it
err = o.remove(ctx, true)
if err != nil {
return errors.Wrap(err, "failed to remove old object")
}
}
// if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err
}
}
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
@@ -1604,7 +1613,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil
}
func (o *Object) remove(ctx context.Context, hard bool) error {
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: o.filePath(),
@@ -1612,7 +1622,7 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
NoResponse: true,
}
if hard {
if o.fs.opt.HardDelete {
opts.Parameters.Set("rm", "true")
} else {
opts.Parameters.Set("dl", "true")
@@ -1624,11 +1634,6 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
})
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.remove(ctx, o.fs.opt.HardDelete)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)

View File

@@ -27,7 +27,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/text/unicode/norm"
)
// Constants
@@ -74,34 +73,25 @@ points, as you explicitly acknowledge that they should be skipped.`,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
Help: `Assume the Stat size of links is zero (and read them instead)
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
- Windows
- On some virtual filesystems (such ash LucidLink)
- Android
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
So rclone now always reads the link
`,
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
Default: false,
Advanced: true,
}, {
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem.
Rclone does not normally touch the encoding of file names it reads from
the file system.
This can be useful when using macOS as it normally provides decomposed (NFD)
unicode which in some language (eg Korean) doesn't display properly on
some OSes.
Note that rclone compares filenames with unicode normalization in the sync
routine so this flag shouldn't normally be used.`,
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
Default: false,
Advanced: true,
}, {
@@ -206,7 +196,8 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
UTFNorm bool `config:"unicode_normalization"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
@@ -265,6 +256,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
@@ -467,10 +462,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for _, name := range names {
namepath := filepath.Join(fsDirPath, name)
fi, fierr := os.Lstat(namepath)
if os.IsNotExist(fierr) {
// skip entry removed by a concurrent goroutine
continue
}
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
@@ -531,9 +522,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
if f.opt.UTFNorm {
filename = norm.NFC.String(filename)
}
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
@@ -1029,6 +1017,7 @@ func (file *localOpenFile) Close() (err error) {
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
// Read the link and return the destination it as the contents of the object
linkdst, err := os.Readlink(o.path)
fs.Infof(o, "openTranslatedLink link = %q, offset = %d, limit = %d, err = %v", linkdst, offset, limit, err)
if err != nil {
return nil, err
}
@@ -1279,14 +1268,11 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// Read the size of the link.
//
// The value in info.Size() is not always correct
// - Windows links read as 0 size
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
// - Android - some versions the links are larger than readlink suggests
if o.translatedLink {
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
linkdst, err := os.Readlink(o.path)
fs.Infof(o, "setMetadata link = %q, err = %v", linkdst, err)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {

View File

@@ -80,7 +80,7 @@ var oauthConfig = &oauth2.Config{
// Register with Fs
func init() {
MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New)
MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
fs.Register(&fs.RegInfo{
Name: "mailru",
Description: "Mail.ru Cloud",

View File

@@ -93,12 +93,213 @@ var (
// Register with Fs
func init() {
QuickXorHashType = hash.RegisterHash("quickxor", "QuickXorHash", 40, quickxorhash.New)
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
fs.Register(&fs.RegInfo{
Name: "onedrive",
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: Config,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
region, _ := m.Get("region")
graphURL := graphAPIEndpoint[region] + "/v1.0"
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
ci := fs.GetConfig(ctx)
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to configure OneDrive")
}
srv := rest.NewClient(oAuthClient)
var opts rest.Opts
var finalDriveID string
var siteID string
var relativePath string
switch config.Choose("Your choice",
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
[]string{
"OneDrive Personal or Business",
"Root Sharepoint site",
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
"Search for a Sharepoint site",
"Type in driveID (advanced)",
"Type in SiteID (advanced)",
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
},
false) {
case "onedrive":
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/me/drives",
}
case "sharepoint":
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
}
case "driveid":
fmt.Printf("Paste your Drive ID here> ")
finalDriveID = config.ReadLine()
case "siteid":
fmt.Printf("Paste your Site ID here> ")
siteID = config.ReadLine()
case "url":
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
fmt.Printf("Paste your Site URL here> ")
siteURL := config.ReadLine()
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
relativePath = "/sites/" + match[1]
} else {
relativePath = "/sites/" + siteURL
}
case "path":
fmt.Printf("Enter server-relative URL here> ")
relativePath = config.ReadLine()
case "search":
fmt.Printf("What to search for> ")
searchTerm := config.ReadLine()
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
return errors.Wrap(err, "failed to query available sites")
}
if len(sites.Sites) == 0 {
return errors.Errorf("search for %q returned no results", searchTerm)
}
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
for index, site := range sites.Sites {
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
}
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
}
// if we use server-relative URL for finding the drive
if relativePath != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root:" + relativePath,
}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &site)
if err != nil {
return errors.Wrap(err, "failed to query available site by relative path")
}
siteID = site.SiteID
}
// if we have a siteID we need to ask for the drives
if siteID != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + siteID + "/drives",
}
}
// We don't have the final ID yet?
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
if err != nil {
return errors.Wrap(err, "failed to query available drives")
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts.Path == "/me/drives" {
opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
if err != nil {
return errors.Wrap(err, "failed to query available drives")
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
if len(drives.Drives) == 0 {
return errors.New("no drives found")
}
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
for index, drive := range drives.Drives {
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
}
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
}
// Test the driveID and get drive type
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
if err != nil {
return errors.Wrapf(err, "failed to query root for drive %s", finalDriveID)
}
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
// This does not work, YET :)
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
return errors.New("cancelled by user")
}
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
Help: "Choose national cloud region for OneDrive.",
@@ -261,266 +462,6 @@ At the time of writing this only works with OneDrive personal paid accounts.
})
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
// Get the region and graphURL from the config
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
region, _ = m.Get("region")
graphURL = graphAPIEndpoint[region] + "/v1.0"
return region, graphURL
}
// Config for chooseDrive
type chooseDriveOpt struct {
opts rest.Opts
finalDriveID string
siteID string
relativePath string
}
// chooseDrive returns a query to choose which drive the user is interested in
func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest.Client, opt chooseDriveOpt) (*fs.ConfigOut, error) {
_, graphURL := getRegionURL(m)
// if we use server-relative URL for finding the drive
if opt.relativePath != "" {
opt.opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root:" + opt.relativePath,
}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
}
opt.siteID = site.SiteID
}
// if we have a siteID we need to ask for the drives
if opt.siteID != "" {
opt.opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + opt.siteID + "/drives",
}
}
drives := drivesResponse{}
// We don't have the final ID yet?
// query Microsoft Graph
if opt.finalDriveID == "" {
_, err := srv.CallJSON(ctx, &opt.opts, nil, &drives)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opt.opts.Path == "/me/drives" {
opt.opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
} else {
drives.Drives = append(drives.Drives, driveResource{
DriveID: opt.finalDriveID,
DriveName: "Chosen Drive ID",
DriveType: "drive",
})
}
if len(drives.Drives) == 0 {
return fs.ConfigError("choose_type", "No drives found")
}
return fs.ConfigChoose("driveid_final", "config_driveid", "Select drive you want to use", len(drives.Drives), func(i int) (string, string) {
drive := drives.Drives[i]
return drive.DriveID, fmt.Sprintf("%s (%s)", drive.DriveName, drive.DriveType)
})
}
// Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
region, graphURL := getRegionURL(m)
if config.State == "" {
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure OneDrive")
}
srv := rest.NewClient(oAuthClient)
switch config.State {
case "choose_type":
return fs.ConfigChooseFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive",
Help: "OneDrive Personal or Business",
}, {
Value: "sharepoint",
Help: "Root Sharepoint site",
}, {
Value: "url",
Help: "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
}, {
Value: "search",
Help: "Search for a Sharepoint site",
}, {
Value: "driveid",
Help: "Type in driveID (advanced)",
}, {
Value: "siteid",
Help: "Type in SiteID (advanced)",
}, {
Value: "path",
Help: "Sharepoint server-relative path (advanced, e.g. /teams/hr)",
}})
case "choose_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(config.Result)
case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/me/drives",
},
})
case "sharepoint":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
},
})
case "driveid":
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: config.Result,
})
case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: config.Result,
})
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
`)
case "url_end":
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + match[1],
})
}
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + siteURL,
})
case "path":
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: config.Result,
})
case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end":
searchTerm := config.Result
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
}
if len(sites.Sites) == 0 {
return fs.ConfigError("choose_type", fmt.Sprintf("search for %q returned no results", searchTerm))
}
return fs.ConfigChoose("search_sites", "config_site", `Select the Site you want to use`, len(sites.Sites), func(i int) (string, string) {
site := sites.Sites[i]
return site.SiteID, fmt.Sprintf("%s (%s)", site.SiteName, site.SiteURL)
})
case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: config.Result,
})
case "driveid_final":
finalDriveID := config.Result
// Test the driveID and get drive type
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query root for drive %q: %v", finalDriveID, err))
}
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end":
if config.Result == "true" {
return nil, nil
}
return fs.ConfigGoto("choose_type")
}
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`
@@ -1500,85 +1441,10 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, resp, err)
})
if err != nil {
if resp != nil && resp.StatusCode == 400 && f.driveType != driveTypePersonal {
return "", errors.Errorf("%v (is making public links permitted by the org admin?)", err)
}
fmt.Println(err)
return "", err
}
shareURL := result.Link.WebURL
// Convert share link to direct download link if target is not a folder
// Not attempting to do the conversion for regional versions, just to be safe
if f.opt.Region != regionGlobal {
return shareURL, nil
}
if info.Folder != nil {
fs.Debugf(nil, "Can't convert share link for folder to direct link - returning the link as is")
return shareURL, nil
}
cnvFailMsg := "Don't know how to convert share link to direct link - returning the link as is"
directURL := ""
segments := strings.Split(shareURL, "/")
switch f.driveType {
case driveTypePersonal:
// Method: https://stackoverflow.com/questions/37951114/direct-download-link-to-onedrive-file
if len(segments) != 5 {
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
enc := base64.StdEncoding.EncodeToString([]byte(shareURL))
enc = strings.ReplaceAll(enc, "/", "_")
enc = strings.ReplaceAll(enc, "+", "-")
enc = strings.ReplaceAll(enc, "=", "")
directURL = fmt.Sprintf("https://api.onedrive.com/v1.0/shares/u!%s/root/content", enc)
case driveTypeBusiness:
// Method: https://docs.microsoft.com/en-us/sharepoint/dev/spfx/shorter-share-link-format
// Example:
// https://{tenant}-my.sharepoint.com/:t:/g/personal/{user_email}/{Opaque_String}
// --convert to->
// https://{tenant}-my.sharepoint.com/personal/{user_email}/_layouts/15/download.aspx?share={Opaque_String}
if len(segments) != 8 {
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
directURL = fmt.Sprintf("https://%s/%s/%s/_layouts/15/download.aspx?share=%s",
segments[2], segments[5], segments[6], segments[7])
case driveTypeSharepoint:
// Method: Similar to driveTypeBusiness
// Example:
// https://{tenant}.sharepoint.com/:t:/s/{site_name}/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/sites/{site_name}/_layouts/15/download.aspx?share={Opaque_String}
//
// https://{tenant}.sharepoint.com/:t:/t/{team_name}/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/teams/{team_name}/_layouts/15/download.aspx?share={Opaque_String}
//
// https://{tenant}.sharepoint.com/:t:/g/{Opaque_String}
// --convert to->
// https://{tenant}.sharepoint.com/_layouts/15/download.aspx?share={Opaque_String}
if len(segments) < 6 || len(segments) > 7 {
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
pathPrefix := ""
switch segments[4] {
case "s": // Site
pathPrefix = "/sites/" + segments[5]
case "t": // Team
pathPrefix = "/teams/" + segments[5]
case "g": // Root site
default:
fs.Logf(f, cnvFailMsg)
return shareURL, nil
}
directURL = fmt.Sprintf("https://%s%s/_layouts/15/download.aspx?share=%s",
segments[2], pathPrefix, segments[len(segments)-1])
}
return directURL, nil
return result.Link.WebURL, nil
}
// CleanUp deletes all the hidden files.

View File

@@ -71,7 +71,7 @@ func init() {
Name: "pcloud",
Description: "Pcloud",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
optc := new(Options)
err := configstruct.Set(m, optc)
if err != nil {
@@ -93,11 +93,15 @@ func init() {
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
return nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
opt := oauthutil.Options{
CheckAuth: checkAuth,
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
})
}
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -77,10 +77,12 @@ func init() {
Name: "premiumizeme",
Description: "premiumize.me",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: []fs.Option{{
Name: "api_key",

View File

@@ -5,6 +5,7 @@ import (
"regexp"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -59,11 +60,15 @@ func init() {
Name: "putio",
Description: "Put.io",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: putioConfig,
NoOffline: true,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
opt := oauthutil.Options{
NoOffline: true,
}
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -26,7 +26,6 @@ import (
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
@@ -59,7 +58,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, SeaweedFS, and Tencent COS",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
@@ -92,9 +91,6 @@ func init() {
}, {
Value: "Scaleway",
Help: "Scaleway Object Storage",
}, {
Value: "SeaweedFS",
Help: "SeaweedFS S3",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
@@ -431,12 +427,6 @@ func init() {
Help: "Endpoint for OSS API.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "oss-accelerate.aliyuncs.com",
Help: "Global Accelerate",
}, {
Value: "oss-accelerate-overseas.aliyuncs.com",
Help: "Global Accelerate (outside mainland China)",
}, {
Value: "oss-cn-hangzhou.aliyuncs.com",
Help: "East China 1 (Hangzhou)",
}, {
@@ -453,22 +443,10 @@ func init() {
Help: "North China 3 (Zhangjiakou)",
}, {
Value: "oss-cn-huhehaote.aliyuncs.com",
Help: "North China 5 (Hohhot)",
}, {
Value: "oss-cn-wulanchabu.aliyuncs.com",
Help: "North China 6 (Ulanqab)",
Help: "North China 5 (Huhehaote)",
}, {
Value: "oss-cn-shenzhen.aliyuncs.com",
Help: "South China 1 (Shenzhen)",
}, {
Value: "oss-cn-heyuan.aliyuncs.com",
Help: "South China 2 (Heyuan)",
}, {
Value: "oss-cn-guangzhou.aliyuncs.com",
Help: "South China 3 (Guangzhou)",
}, {
Value: "oss-cn-chengdu.aliyuncs.com",
Help: "West China 1 (Chengdu)",
}, {
Value: "oss-cn-hongkong.aliyuncs.com",
Help: "Hong Kong (Hong Kong)",
@@ -614,10 +592,6 @@ func init() {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "localhost:8333",
Help: "SeaweedFS S3 localhost",
Provider: "SeaweedFS",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
@@ -630,10 +604,6 @@ func init() {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint",
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
Help: "Wasabi AP Northeast endpoint",
Provider: "Wasabi",
}},
}, {
Name: "location_constraint",
@@ -1546,11 +1516,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
}),
ExpiryWindow: 3 * time.Minute,
},
// Pick up IAM role if we are in EKS
&stscreds.WebIdentityRoleProvider{
ExpiryWindow: 3 * time.Minute,
},
}
cred := credentials.NewChainCredentials(providers)
@@ -1728,7 +1693,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
GetTier: true,
SlowModTime: true,
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject {
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)

View File

@@ -296,86 +296,83 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Config callback for 2FA
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
func Config(ctx context.Context, name string, m configmap.Mapper) error {
ci := fs.GetConfig(ctx)
serverURL, ok := m.Get(configURL)
if !ok || serverURL == "" {
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
return nil, errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: rclone config reconnect <remote name>: ")
return errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: nrclone config reconnect <remote name>: ")
}
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
}
u, err := url.Parse(serverURL)
if err != nil {
return nil, errors.Errorf("invalid server URL %s", serverURL)
return errors.Errorf("invalid server URL %s", serverURL)
}
is2faEnabled, _ := m.Get(config2FA)
if is2faEnabled != "true" {
return nil, errors.New("two-factor authentication is not enabled on this account")
return errors.New("two-factor authentication is not enabled on this account")
}
username, _ := m.Get(configUser)
if username == "" {
return nil, errors.New("a username is required")
return errors.New("a username is required")
}
password, _ := m.Get(configPassword)
if password != "" {
password, _ = obscure.Reveal(password)
}
// Just make sure we do have a password
for password == "" {
fmt.Print("Two-factor authentication: please enter your password (it won't be saved in the configuration)\npassword> ")
password = config.ReadPassword()
}
switch config.State {
case "":
// Just make sure we do have a password
if password == "" {
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
}
return fs.ConfigGoto("password")
case "password":
password = config.Result
if password == "" {
return fs.ConfigError("password", "Password can't be blank")
}
m.Set(configPassword, obscure.MustObscure(config.Result))
return fs.ConfigGoto("2fa")
case "2fa":
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")
case "2fa_do":
code := config.Result
if code == "" {
return fs.ConfigError("2fa", "2FA codes can't be blank")
// Create rest client for getAuthorizationToken
url := u.String()
if !strings.HasPrefix(url, "/") {
url += "/"
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
// We loop asking for a 2FA code
for {
code := ""
for code == "" {
fmt.Print("Two-factor authentication: please enter your 2FA code\n2fa code> ")
code = config.ReadLine()
}
// Create rest client for getAuthorizationToken
url := u.String()
if !strings.HasPrefix(url, "/") {
url += "/"
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
// We loop asking for a 2FA code
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
fmt.Println("Authenticating...")
token, err := getAuthorizationToken(ctx, srv, username, password, code)
if err != nil {
return fs.ConfigConfirm("2fa_error", true, "config_retry", fmt.Sprintf("Authentication failed: %v\n\nTry Again?", err))
fmt.Printf("Authentication failed: %v\n", err)
tryAgain := strings.ToLower(config.ReadNonEmptyLine("Do you want to try again (y/n)?"))
if tryAgain != "y" && tryAgain != "yes" {
// The user is giving up, we're done here
break
}
}
if token == "" {
return fs.ConfigConfirm("2fa_error", true, "config_retry", "Authentication failed - no token returned.\n\nTry Again?")
if token != "" {
fmt.Println("Success!")
// Let's save the token into the configuration
m.Set(configAuthToken, token)
// And delete any previous entry for password
m.Set(configPassword, "")
// And we're done here
break
}
// Let's save the token into the configuration
m.Set(configAuthToken, token)
// And delete any previous entry for password
m.Set(configPassword, "")
// And we're done here
return nil, nil
case "2fa_error":
if config.Result == "true" {
return fs.ConfigGoto("2fa")
}
return nil, errors.New("2fa authentication failed")
}
return nil, fmt.Errorf("unknown state %q", config.State)
return nil
}
// sets the AuthorizationToken up

View File

@@ -429,6 +429,10 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
)
if f.opt.DisableConcurrentReads { // FIXME
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
}
return sftp.NewClientPipe(pr, pw, opts...)
}
@@ -562,7 +566,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
}

View File

@@ -135,7 +135,7 @@ func init() {
Name: "sharefile",
Description: "Citrix Sharefile",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
oauthConfig := newOauthConfig("")
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
@@ -151,10 +151,14 @@ func init() {
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
return nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
CheckAuth: checkAuth,
})
opt := oauthutil.Options{
CheckAuth: checkAuth,
}
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: []fs.Option{{
Name: "upload_cutoff",

View File

@@ -75,63 +75,51 @@ func init() {
Name: "sugarsync",
Description: "Sugarsync",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "failed to read options")
return errors.Wrap(err, "failed to read options")
}
switch config.State {
case "":
if opt.RefreshToken == "" {
return fs.ConfigGoto("username")
if opt.RefreshToken != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
return nil
}
return fs.ConfigConfirm("refresh", true, "config_refresh", "Already have a token - refresh?")
case "refresh":
if config.Result == "false" {
return nil, nil
}
return fs.ConfigGoto("username")
case "username":
return fs.ConfigInput("password", "config_username", "username (email address)")
case "password":
m.Set("username", config.Result)
return fs.ConfigPassword("auth", "config_password", "Your Sugarsync password.\n\nOnly required during setup and will not be stored.")
case "auth":
username, _ := m.Get("username")
m.Set("username", "")
password := config.Result
authRequest := api.AppAuthorization{
Username: username,
Password: password,
Application: withDefault(opt.AppID, appID),
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
}
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "/app-authorization",
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
// FIXME
//err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
return nil, errors.Wrap(err, "failed to get token")
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", config.State)
}, Options: []fs.Option{{
fmt.Printf("Username (email address)> ")
username := config.ReadLine()
password := config.GetPassword("Your Sugarsync password is only required during setup and will not be stored.")
authRequest := api.AppAuthorization{
Username: username,
Password: password,
Application: withDefault(opt.AppID, appID),
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
}
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "/app-authorization",
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
// FIXME
//err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
return errors.Wrap(err, "failed to get token")
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
return nil
},
Options: []fs.Option{{
Name: "app_id",
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
}, {

View File

@@ -41,19 +41,19 @@ func init() {
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
provider, _ := m.Get(fs.ConfigProvider)
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) error {
provider, _ := configMapper.Get(fs.ConfigProvider)
config.FileDeleteKey(name, fs.ConfigProvider)
if provider == newProvider {
satelliteString, _ := m.Get("satellite_address")
apiKey, _ := m.Get("api_key")
passphrase, _ := m.Get("passphrase")
satelliteString, _ := configMapper.Get("satellite_address")
apiKey, _ := configMapper.Get("api_key")
passphrase, _ := configMapper.Get("passphrase")
// satelliteString contains always default and passphrase can be empty
if apiKey == "" {
return nil, nil
return nil
}
satellite, found := satMap[satelliteString]
@@ -63,23 +63,23 @@ func init() {
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
return nil, errors.Wrap(err, "couldn't create access grant")
return errors.Wrap(err, "couldn't create access grant")
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, errors.Wrap(err, "couldn't serialize access grant")
return errors.Wrap(err, "couldn't serialize access grant")
}
m.Set("satellite_address", satellite)
m.Set("access_grant", serializedAccess)
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serializedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
return nil, errors.Errorf("invalid provider type: %s", provider)
return errors.Errorf("invalid provider type: %s", provider)
}
return nil, nil
return nil
},
Options: []fs.Option{
{

View File

@@ -113,21 +113,6 @@ func init() {
Name: config.ConfigEncoding,
Help: configEncodingHelp,
Advanced: true,
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}},
})
}
@@ -141,7 +126,6 @@ type Options struct {
BearerToken string `config:"bearer_token"`
BearerTokenCommand string `config:"bearer_token_command"`
Enc encoder.MultiEncoder `config:"encoding"`
Headers fs.CommaSepList `config:"headers"`
}
// Fs represents a remote webdav
@@ -375,12 +359,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
fs.Debugf(nil, "found headers: %v", opt.Headers)
rootIsDir := strings.HasSuffix(root, "/")
root = strings.Trim(root, "/")
@@ -450,9 +428,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
}
if opt.Headers != nil {
f.addHeaders(opt.Headers)
}
f.srv.SetErrorHandler(errorHandler)
err = f.setQuirks(ctx, opt.Vendor)
if err != nil {
@@ -512,15 +487,6 @@ func (f *Fs) fetchBearerToken(cmd string) (string, error) {
return stdoutString, nil
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(headers fs.CommaSepList) {
for i := 0; i < len(headers); i += 2 {
key := f.opt.Headers[i]
value := f.opt.Headers[i+1]
f.srv.SetHeader(key, value)
}
}
// fetch the bearer token and set it if successful
func (f *Fs) fetchAndSetBearerToken() error {
if f.opt.BearerTokenCommand == "" {

View File

@@ -1,74 +0,0 @@
package webdav_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/rclone/rclone/backend/webdav"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestWebDAV"
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
// with each request the headers option tests are executed
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server
fileServer := http.FileServer(http.Dir(""))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "webdav",
"url": ts.URL,
// add headers to test the headers option
"headers": strings.Join(headers, ","),
}
// return a function to tidy up
return m, ts.Close
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate the WebDAV server
f, err := webdav.NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f, tidy
}
// TestHeaders any request will test the headers option
func TestHeaders(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
// any request will do
_, err := f.Features().About(context.Background())
require.NoError(t, err)
}

View File

@@ -60,10 +60,12 @@ func init() {
Name: "yandex",
Description: "Yandex Disk",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "yandex", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -72,97 +72,45 @@ func init() {
Name: "zoho",
Description: "Zoho",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
// Need to setup region before configuring oauth
err := setupRegion(m)
if err != nil {
return nil, err
return err
}
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
opt := oauthutil.Options{
// No refresh token unless ApprovalForce is set
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
}
if err := oauthutil.Config(ctx, "zoho", name, m, oauthConfig, &opt); err != nil {
return errors.Wrap(err, "failed to configure token")
}
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
// it's own custom type
token, err := oauthutil.GetToken(name, m)
if err != nil {
return errors.Wrap(err, "failed to read token")
}
if token.TokenType != "Zoho-oauthtoken" {
token.TokenType = "Zoho-oauthtoken"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to load oAuthClient")
return errors.Wrap(err, "failed to configure token")
}
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
return authSrv, apiSrv, nil
}
switch config.State {
case "":
return oauthutil.ConfigOut("teams", &oauthutil.Options{
OAuth2Config: oauthConfig,
// No refresh token unless ApprovalForce is set
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
})
case "teams":
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
// it's own custom type
token, err := oauthutil.GetToken(name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to read token")
}
if token.TokenType != "Zoho-oauthtoken" {
token.TokenType = "Zoho-oauthtoken"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token")
}
}
authSrv, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
// Get the user Info
opts := rest.Opts{
Method: "GET",
Path: "/oauth/user/info",
}
var user api.User
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
if err != nil {
return nil, err
}
// Get the teams
teams, err := listTeams(ctx, user.ZUID, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("workspace", "config_team_drive_id", "Team Drive ID", len(teams), func(i int) (string, string) {
team := teams[i]
return team.ID, team.Attributes.Name
})
case "workspace":
_, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
teamID := config.Result
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
workspace := workspaces[i]
return workspace.ID, workspace.Attributes.Name
})
case "workspace_end":
worksspaceID := config.Result
m.Set(configRootID, worksspaceID)
return nil, nil
if fs.GetConfig(ctx).AutoConfirm {
return nil
}
return nil, fmt.Errorf("unknown state %q", config.State)
if err = setupRoot(ctx, name, m); err != nil {
return errors.Wrap(err, "failed to configure root directory")
}
return nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
Help: `Zoho region to connect to.
You'll have to use the region your organization is registered in. If
not sure use the same top level domain as you connect to in your
browser.`,
Help: "Zoho region to connect to. You'll have to use the region you organization is registered in.",
Examples: []fs.OptionExample{{
Value: "com",
Help: "United states / Global",
@@ -261,6 +209,49 @@ func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api
return workspaceList.TeamWorkspace, nil
}
func setupRoot(ctx context.Context, name string, m configmap.Mapper) error {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
authSrv := rest.NewClient(oAuthClient).SetRoot(accountsURL)
opts := rest.Opts{
Method: "GET",
Path: "/oauth/user/info",
}
var user api.User
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
if err != nil {
return err
}
apiSrv := rest.NewClient(oAuthClient).SetRoot(rootURL)
teams, err := listTeams(ctx, user.ZUID, apiSrv)
if err != nil {
return err
}
var teamIDs, teamNames []string
for _, team := range teams {
teamIDs = append(teamIDs, team.ID)
teamNames = append(teamNames, team.Attributes.Name)
}
teamID := config.Choose("Enter a Team Drive ID", teamIDs, teamNames, true)
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
if err != nil {
return err
}
var workspaceIDs, workspaceNames []string
for _, workspace := range workspaces {
workspaceIDs = append(workspaceIDs, workspace.ID)
workspaceNames = append(workspaceNames, workspace.Attributes.Name)
}
worksspaceID := config.Choose("Enter a Workspace ID", workspaceIDs, workspaceNames, true)
m.Set(configRootID, worksspaceID)
return nil
}
// --------------------------------------------------------------
// retryErrorCodes is a slice of error codes that we will retry

View File

@@ -1,5 +1,3 @@
# Email addresses to ignore in the git log when making the authors.md file
<nick@raig-wood.com>
<anaghk.dos@gmail.com>
<33207650+sp31415t1@users.noreply.github.com>
<unknown>

View File

@@ -1,203 +0,0 @@
#!/usr/bin/env python3
"""
Test program to demonstrate the remote config interfaces in
rclone.
This program can simulate
rclone config create
rclone config update
rclone config password - NOT implemented yet
rclone authorize - NOT implemented yet
Pass the desired action as the first argument then any parameters.
This assumes passwords will be passed in the clear.
"""
import argparse
import subprocess
import json
from pprint import pprint
sep = "-"*60
def rpc(args, command, params):
"""
Run the command. This could be either over the CLI or the API.
Here we run over the API either using `rclone rc --loopback` which
is useful for making sure state is saved properly or to an
existing rclone rcd if `--rc` is used on the command line.
"""
if args.rc:
import requests
kwargs = {
"json": params,
}
if args.user:
kwargs["auth"] = (args.user, args.password)
r = requests.post('http://localhost:5572/'+command, **kwargs)
if r.status_code != 200:
raise ValueError(f"RC command failed: Error {r.status_code}: {r.text}")
return r.json()
cmd = ["rclone", "-vv", "rc", "--loopback", command, "--json", json.dumps(params)]
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return json.loads(result.stdout)
def parse_parameters(parameters):
"""
Parse the incoming key=value parameters into a dict
"""
d = {}
for param in parameters:
parts = param.split("=", 1)
if len(parts) != 2:
raise ValueError("bad format for parameter need name=value")
d[parts[0]] = parts[1]
return d
def ask(opt):
"""
Ask the user to enter the option
This is the user interface for asking a user a question.
If there are examples they should be presented.
"""
while True:
if opt["IsPassword"]:
print("*** Inputting a password")
print(opt['Help'])
examples = opt.get("Examples", ())
or_number = ""
if len(examples) > 0:
or_number = " or choice number"
for i, example in enumerate(examples):
print(f"{i:3} value: {example['Value']}")
print(f" help: {example['Help']}")
print(f"Enter a {opt['Type']} value{or_number}. Press Enter for the default ('{opt['DefaultStr']}')")
print(f"{opt['Name']}> ", end='')
s = input()
if s == "":
return opt["DefaultStr"]
try:
i = int(s)
if i >= 0 and i < len(examples):
return examples[i]["Value"]
except ValueError:
pass
if opt["Exclusive"]:
for example in examples:
if s == example["Value"]:
return s
# Exclusive is set but the value isn't one of the accepted
# ones so continue
print("Value isn't one of the acceptable values")
else:
return s
return s
def create_or_update(what, args):
"""
Run the equivalent of rclone config create
or rclone config update
what should either be "create" or "update
"""
print(what, args)
params = parse_parameters(args.parameters)
inp = {
"name": args.name,
"parameters": params,
"opt": {
"nonInteractive": True,
"all": args.all,
"noObscure": args.obscured_passwords,
"obscure": not args.obscured_passwords,
},
}
if what == "create":
inp["type"] = args.type
while True:
print(sep)
print("Input to API")
pprint(inp)
print(sep)
out = rpc(args, "config/"+what, inp)
print(sep)
print("Output from API")
pprint(out)
print(sep)
if out["State"] == "":
return
if out["Error"]:
print("Error", out["Error"])
result = ask(out["Option"])
inp["opt"]["state"] = out["State"]
inp["opt"]["result"] = result
inp["opt"]["continue"] = True
def create(args):
"""Run the equivalent of rclone config create"""
create_or_update("create", args)
def update(args):
"""Run the equivalent of rclone config update"""
create_or_update("update", args)
def password(args):
"""Run the equivalent of rclone config password"""
print("password", args)
raise NotImplementedError()
def authorize(args):
"""Run the equivalent of rclone authorize"""
print("authorize", args)
raise NotImplementedError()
def main():
"""
Make the command line parser and dispatch
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("-a", "--all", action='store_true',
help="Ask all the config questions if set")
parser.add_argument("-o", "--obscured-passwords", action='store_true',
help="If set assume the passwords are obscured")
parser.add_argument("--rc", action='store_true',
help="If set use the rc (you'll need to start an rclone rcd)")
parser.add_argument("--user", type=str, default="",
help="Username for use with --rc")
parser.add_argument("--pass", type=str, default="", dest='password',
help="Password for use with --rc")
subparsers = parser.add_subparsers(dest='command', required=True)
subparser = subparsers.add_parser('create')
subparser.add_argument("name", type=str, help="Name of remote to create")
subparser.add_argument("type", type=str, help="Type of remote to create")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=create)
subparser = subparsers.add_parser('update')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=update)
subparser = subparsers.add_parser('password')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=password)
subparser = subparsers.add_parser('authorize')
subparser.set_defaults(func=authorize)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()

View File

@@ -23,7 +23,6 @@ docs = [
"rc.md",
"overview.md",
"flags.md",
"docker.md",
# Keep these alphabetical by full name
"fichier.md",

View File

@@ -10,7 +10,6 @@ import (
_ "github.com/rclone/rclone/cmd/cachestats"
_ "github.com/rclone/rclone/cmd/cat"
_ "github.com/rclone/rclone/cmd/check"
_ "github.com/rclone/rclone/cmd/checksum"
_ "github.com/rclone/rclone/cmd/cleanup"
_ "github.com/rclone/rclone/cmd/cmount"
_ "github.com/rclone/rclone/cmd/config"
@@ -19,6 +18,7 @@ import (
_ "github.com/rclone/rclone/cmd/copyurl"
_ "github.com/rclone/rclone/cmd/cryptcheck"
_ "github.com/rclone/rclone/cmd/cryptdecode"
_ "github.com/rclone/rclone/cmd/dbhashsum"
_ "github.com/rclone/rclone/cmd/dedupe"
_ "github.com/rclone/rclone/cmd/delete"
_ "github.com/rclone/rclone/cmd/deletefile"

View File

@@ -2,7 +2,6 @@ package check
import (
"context"
"fmt"
"io"
"os"
"strings"
@@ -10,7 +9,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -18,22 +16,20 @@ import (
// Globals
var (
download = false
oneway = false
combined = ""
missingOnSrc = ""
missingOnDst = ""
match = ""
differ = ""
errFile = ""
checkFileHashType = ""
download = false
oneway = false
combined = ""
missingOnSrc = ""
missingOnDst = ""
match = ""
differ = ""
errFile = ""
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type")
AddFlags(cmdFlags)
}
@@ -129,6 +125,7 @@ func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err
}
return opt, close, nil
}
var commandDefinition = &cobra.Command{
@@ -146,50 +143,20 @@ If you supply the |--download| flag, it will download the data from
both remotes and check them against each other on the fly. This can
be useful for remotes that don't support hashes or if you really want
to check all the data.
If you supply the |--checkfile HASH| flag with a valid hash name,
the |source:path| must point to a text file in the SUM format.
`, "|", "`") + FlagsHelp,
RunE: func(command *cobra.Command, args []string) error {
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
var (
fsrc, fdst fs.Fs
hashType hash.Type
fsum fs.Fs
sumFile string
)
if checkFileHashType != "" {
if err := hashType.Set(checkFileHashType); err != nil {
fmt.Println(hash.HelpString(0))
return err
}
fsum, sumFile, fsrc = cmd.NewFsSrcFileDst(args)
} else {
fsrc, fdst = cmd.NewFsSrcDst(args)
}
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(false, true, command, func() error {
opt, close, err := GetCheckOpt(fsrc, fdst)
if err != nil {
return err
}
defer close()
if checkFileHashType != "" {
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download)
}
if download {
return operations.CheckDownload(context.Background(), opt)
}
hashType := fsrc.Hashes().Overlap(fdst.Hashes()).GetOne()
if hashType == hash.None {
fs.Errorf(nil, "No common hash found - not using a hash for checks")
} else {
fs.Infof(nil, "Using %v for hash comparisons", hashType)
}
return operations.Check(context.Background(), opt)
})
return nil
},
}

View File

@@ -1,57 +0,0 @@
package checksum
import (
"context"
"fmt"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/check" // for common flags
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var download = false
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents.")
check.AddFlags(cmdFlags)
}
var commandDefinition = &cobra.Command{
Use: "checksum <hash> sumfile src:path",
Short: `Checks the files in the source against a SUM file.`,
Long: strings.ReplaceAll(`
Checks that hashsums of source files match the SUM file.
It compares hashes (MD5, SHA1, etc) and logs a report of files which
don't match. It doesn't alter the file system.
If you supply the |--download| flag, it will download the data from remote
and calculate the contents hash on the fly. This can be useful for remotes
that don't support hashes or if you really want to check all the data.
`, "|", "`") + check.FlagsHelp,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 3, command, args)
var hashType hash.Type
if err := hashType.Set(args[0]); err != nil {
fmt.Println(hash.HelpString(0))
return err
}
fsum, sumFile, fsrc := cmd.NewFsSrcFileDst(args[1:])
cmd.Run(false, true, command, func() error {
opt, close, err := check.GetCheckOpt(nil, fsrc)
if err != nil {
return err
}
defer close()
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hashType, opt, download)
})
return nil
},
}

View File

@@ -37,7 +37,6 @@ import (
"github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/exitcode"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
@@ -61,6 +60,19 @@ var (
errorTooManyArguments = errors.New("too many arguments")
)
const (
exitCodeSuccess = iota
exitCodeUsageError
exitCodeUncategorizedError
exitCodeDirNotFound
exitCodeFileNotFound
exitCodeRetryError
exitCodeNoRetryError
exitCodeFatalError
exitCodeTransferExceeded
exitCodeNoFilesTransferred
)
// ShowVersion prints the version to stdout
func ShowVersion() {
osVersion, osKernel := buildinfo.GetOSVersion()
@@ -472,31 +484,31 @@ func resolveExitCode(err error) {
if err == nil {
if ci.ErrorOnNoTransfer {
if accounting.GlobalStats().GetTransfers() == 0 {
os.Exit(exitcode.NoFilesTransferred)
os.Exit(exitCodeNoFilesTransferred)
}
}
os.Exit(exitcode.Success)
os.Exit(exitCodeSuccess)
}
_, unwrapped := fserrors.Cause(err)
switch {
case unwrapped == fs.ErrorDirNotFound:
os.Exit(exitcode.DirNotFound)
os.Exit(exitCodeDirNotFound)
case unwrapped == fs.ErrorObjectNotFound:
os.Exit(exitcode.FileNotFound)
os.Exit(exitCodeFileNotFound)
case unwrapped == errorUncategorized:
os.Exit(exitcode.UncategorizedError)
os.Exit(exitCodeUncategorizedError)
case unwrapped == accounting.ErrorMaxTransferLimitReached:
os.Exit(exitcode.TransferExceeded)
os.Exit(exitCodeTransferExceeded)
case fserrors.ShouldRetry(err):
os.Exit(exitcode.RetryError)
os.Exit(exitCodeRetryError)
case fserrors.IsNoRetryError(err):
os.Exit(exitcode.NoRetryError)
os.Exit(exitCodeNoRetryError)
case fserrors.IsFatalError(err):
os.Exit(exitcode.FatalError)
os.Exit(exitCodeFatalError)
default:
os.Exit(exitcode.UsageError)
os.Exit(exitCodeUsageError)
}
}
@@ -527,8 +539,7 @@ func AddBackendFlags() {
if opt.IsPassword {
help += " (obscured)"
}
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
flags.SetDefaultFromEnv(pflag.CommandLine, name)
flag := flags.VarPF(pflag.CommandLine, opt, name, opt.ShortOpt, help)
if _, isBool := opt.Default.(bool); isBool {
flag.NoOptDefVal = "true"
}

View File

@@ -105,14 +105,12 @@ var configProvidersCommand = &cobra.Command{
},
}
var updateRemoteOpt config.UpdateRemoteOpt
var configPasswordHelp = strings.ReplaceAll(`
Note that if the config process would normally ask a question the
default is taken (unless |--non-interactive| is used). Each time
that happens rclone will print or DEBUG a message saying how to
affect the value taken.
var (
configObscure bool
configNoObscure bool
)
const configPasswordHelp = `
If any of the parameters passed is a password field, then rclone will
automatically obscure them if they aren't already obscured before
putting them in the config file.
@@ -121,170 +119,84 @@ putting them in the config file.
consists only of base64 characters then rclone can get confused about
whether the password is already obscured or not and put unobscured
passwords into the config file. If you want to be 100% certain that
the passwords get obscured then use the |--obscure| flag, or if you
the passwords get obscured then use the "--obscure" flag, or if you
are 100% certain you are already passing obscured passwords then use
|--no-obscure|. You can also set obscured passwords using the
|rclone config password| command.
"--no-obscure". You can also set obscured passwords using the
"rclone config password" command.
`
The flag |--non-interactive| is for use by applications that wish to
configure rclone themeselves, rather than using rclone's text based
configuration questions. If this flag is set, and rclone needs to ask
the user a question, a JSON blob will be returned with the question in
it.
This will look something like (some irrelevant detail removed):
|||
{
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
"Default": true,
"Examples": [
{
"Value": "true",
"Help": "Yes"
},
{
"Value": "false",
"Help": "No"
}
],
"Required": false,
"IsPassword": false,
"Type": "bool",
"Exclusive": true,
},
"Error": "",
}
|||
The format of |Option| is the same as returned by |rclone config
providers|. The question should be asked to the user and returned to
rclone as the |--result| option along with the |--state| parameter.
The keys of |Option| are used as follows:
- |Name| - name of variable - show to user
- |Help| - help text. Hard wrapped at 80 chars. Any URLs should be clicky.
- |Default| - default value - return this if the user just wants the default.
- |Examples| - the user should be able to choose one of these
- |Required| - the value should be non-empty
- |IsPassword| - the value is a password and should be edited as such
- |Type| - type of value, eg |bool|, |string|, |int| and others
- |Exclusive| - if set no free-form entry allowed only the |Examples|
- Irrelevant keys |Provider|, |ShortOpt|, |Hide|, |NoPrefix|, |Advanced|
If |Error| is set then it should be shown to the user at the same
time as the question.
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
Note that when using |--continue| all passwords should be passed in
the clear (not obscured). Any default config values should be passed
in with each invocation of |--continue|.
At the end of the non interactive process, rclone will return a result
with |State| as empty string.
If |--all| is passed then rclone will ask all the config questions,
not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that |bin/config.py| in the rclone source implements this protocol
as a readable demonstration.
`, "|", "`")
var configCreateCommand = &cobra.Command{
Use: "create `name` `type` [`key` `value`]*",
Short: `Create a new remote with name, type and options.`,
Long: strings.ReplaceAll(`
Create a new remote of |name| with |type| and options. The options
should be passed in pairs of |key| |value| or as |key=value|.
Long: `
Create a new remote of ` + "`name`" + ` with ` + "`type`" + ` and options. The options
should be passed in pairs of ` + "`key` `value`" + `.
For example to make a swift remote of name myremote using auto config
you would do:
rclone config create myremote swift env_auth true
rclone config create myremote swift env_auth=true
Note that if the config process would normally ask a question the
default is taken. Each time that happens rclone will print a message
saying how to affect the value taken.
` + configPasswordHelp + `
So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this:
rclone config create mydrive drive config_is_local=false
`, "|", "`") + configPasswordHelp,
rclone config create mydrive drive config_is_local false
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 256, command, args)
in, err := argsToMap(args[2:])
if err != nil {
return err
}
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
return config.CreateRemote(context.Background(), args[0], args[1], in, opts)
})
},
}
func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.ConfigOut, error)) error {
out, err := do(updateRemoteOpt)
if err != nil {
return err
}
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
config.ShowRemote(name)
} else {
if out == nil {
out = &fs.ConfigOut{}
}
outBytes, err := json.MarshalIndent(out, "", "\t")
err = config.CreateRemote(context.Background(), args[0], args[1], in, configObscure, configNoObscure)
if err != nil {
return err
}
_, _ = os.Stdout.Write(outBytes)
_, _ = os.Stdout.WriteString("\n")
}
return nil
config.ShowRemote(args[0])
return nil
},
}
func init() {
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions.")
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue.")
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue.")
flags.BoolVarP(cmdFlags, &configObscure, "obscure", "", false, "Force any passwords to be obscured.")
flags.BoolVarP(cmdFlags, &configNoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
}
}
var configUpdateCommand = &cobra.Command{
Use: "update `name` [`key` `value`]+",
Short: `Update options in an existing remote.`,
Long: strings.ReplaceAll(`
Long: `
Update an existing remote's options. The options should be passed in
pairs of |key| |value| or as |key=value|.
in pairs of ` + "`key` `value`" + `.
For example to update the env_auth field of a remote of name myremote
you would do:
rclone config update myremote env_auth true
rclone config update myremote env_auth=true
rclone config update myremote swift env_auth true
` + configPasswordHelp + `
If the remote uses OAuth the token will be updated, if you don't
require this add an extra parameter thus:
rclone config update myremote env_auth=true config_refresh_token=false
`, "|", "`") + configPasswordHelp,
rclone config update myremote swift env_auth true config_refresh_token false
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 256, command, args)
cmd.CheckArgs(3, 256, command, args)
in, err := argsToMap(args[1:])
if err != nil {
return err
}
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
return config.UpdateRemote(context.Background(), args[0], in, opts)
})
err = config.UpdateRemote(context.Background(), args[0], in, configObscure, configNoObscure)
if err != nil {
return err
}
config.ShowRemote(args[0])
return nil
},
}
@@ -300,21 +212,19 @@ var configDeleteCommand = &cobra.Command{
var configPasswordCommand = &cobra.Command{
Use: "password `name` [`key` `value`]+",
Short: `Update password in an existing remote.`,
Long: strings.ReplaceAll(`
Long: `
Update an existing remote's password. The password
should be passed in pairs of |key| |password| or as |key=password|.
The |password| should be passed in in clear (unobscured).
should be passed in pairs of ` + "`key` `value`" + `.
For example to set password of a remote of name myremote you would do:
rclone config password myremote fieldname mypassword
rclone config password myremote fieldname=mypassword
This command is obsolete now that "config update" and "config create"
both support obscuring passwords directly.
`, "|", "`"),
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 256, command, args)
cmd.CheckArgs(3, 256, command, args)
in, err := argsToMap(args[1:])
if err != nil {
return err
@@ -328,24 +238,16 @@ both support obscuring passwords directly.
},
}
// This takes a list of arguments in key value key value form, or
// key=value key=value form and converts it into a map
// This takes a list of arguments in key value key value form and
// converts it into a map
func argsToMap(args []string) (out rc.Params, err error) {
if len(args)%2 != 0 {
return nil, errors.New("found key without value")
}
out = rc.Params{}
for i := 0; i < len(args); i++ {
key := args[i]
equals := strings.IndexRune(key, '=')
var value string
if equals >= 0 {
key, value = key[:equals], key[equals+1:]
} else {
i++
if i >= len(args) {
return nil, errors.New("found key without value")
}
value = args[i]
}
out[key] = value
// Set the config
for i := 0; i < len(args); i += 2 {
out[args[i]] = args[i+1]
}
return out, nil
}
@@ -363,11 +265,14 @@ This normally means going through the interactive oauth flow again.
RunE: func(command *cobra.Command, args []string) error {
ctx := context.Background()
cmd.CheckArgs(1, 1, command, args)
fsInfo, configName, _, m, err := fs.ConfigFs(args[0])
fsInfo, configName, _, config, err := fs.ConfigFs(args[0])
if err != nil {
return err
}
return config.PostConfig(ctx, configName, m, fsInfo)
if fsInfo.Config == nil {
return errors.Errorf("%s: doesn't support Reconnect", configName)
}
return fsInfo.Config(ctx, configName, config)
},
}

View File

@@ -1,59 +0,0 @@
package config
import (
"fmt"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
)
func TestArgsToMap(t *testing.T) {
for _, test := range []struct {
args []string
want rc.Params
wantErr bool
}{
{
args: []string{},
want: rc.Params{},
},
{
args: []string{"hello", "42"},
want: rc.Params{"hello": "42"},
},
{
args: []string{"hello", "42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye", "43", "unused"},
wantErr: true,
},
{
args: []string{"hello=42", "bye=43", "unused"},
wantErr: true,
},
} {
what := fmt.Sprintf("args = %#v", test.args)
got, err := argsToMap(test.args)
if test.wantErr {
assert.Error(t, err, what)
} else {
assert.NoError(t, err, what)
assert.Equal(t, test.want, got, what)
}
}
}

View File

@@ -37,8 +37,8 @@ Download a URL's content and copy it to the destination without saving
it in temporary storage.
Setting ` + "`--auto-filename`" + ` will cause the file name to be retrieved from
the URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resulting file name will
the from URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
be printed.
Setting ` + "`--no-clobber`" + ` will prevent overwriting file on the

View File

@@ -0,0 +1,51 @@
package dbhashsum
import (
"context"
"github.com/rclone/rclone/backend/dropbox"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/hashsum"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
hashsum.AddHashFlags(cmdFlags)
}
var commandDefinition = &cobra.Command{
Use: "dbhashsum remote:path",
Short: `Produces a Dropbox hash file for all the objects in the path.`,
Long: `
Produces a Dropbox hash file for all the objects in the path. The
hashes are calculated according to [Dropbox content hash
rules](https://www.dropbox.com/developers/reference/content-hash).
The output is in the same format as md5sum and sha1sum.
By default, the hash is requested from the remote. If Dropbox hash is
not supported by the remote, no hash will be returned. With the
download flag, the file will be downloaded from the remote and
hashed locally enabling Dropbox hash for any remote.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
cmd.Run(false, false, command, func() error {
if hashsum.HashsumOutfile == "" {
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil)
}
output, close, err := hashsum.GetHashsumOutput(hashsum.HashsumOutfile)
if err != nil {
return err
}
defer close()
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
})
},
}

View File

@@ -15,12 +15,11 @@ import (
"github.com/spf13/pflag"
)
// Global hashsum flags for reuse in hashsum, md5sum, sha1sum
// Global hashsum flags for reuse in md5sum, sha1sum, and dbhashsum
var (
OutputBase64 = false
DownloadFlag = false
HashsumOutfile = ""
ChecksumFile = ""
)
func init() {
@@ -29,11 +28,10 @@ func init() {
AddHashFlags(cmdFlags)
}
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum, and dbhashsum
func AddHashFlags(cmdFlags *pflag.FlagSet) {
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them")
flags.BoolVarP(cmdFlags, &DownloadFlag, "download", "", DownloadFlag, "Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote")
}
@@ -71,17 +69,23 @@ hashed locally enabling any hash for any remote.
Run without a hash to see the list of all supported hashes, e.g.
$ rclone hashsum
` + hash.HelpString(4) + `
Supported hashes are:
* MD5
* SHA-1
* DropboxHash
* QuickXorHash
Then
$ rclone hashsum MD5 remote:path
Note that hash names are case insensitive.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 2, command, args)
if len(args) == 0 {
fmt.Print(hash.HelpString(0))
fmt.Printf("Supported hashes are:\n")
for _, ht := range hash.Supported().Array() {
fmt.Printf(" * %v\n", ht)
}
return nil
} else if len(args) == 1 {
return errors.New("need hash type and remote")
@@ -89,16 +93,11 @@ Note that hash names are case insensitive.
var ht hash.Type
err := ht.Set(args[0])
if err != nil {
fmt.Println(hash.HelpString(0))
return err
}
fsrc := cmd.NewFsSrc(args[1:])
cmd.Run(false, false, command, func() error {
if ChecksumFile != "" {
fsum, sumFile := cmd.NewFsFile(ChecksumFile)
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, ht, nil, DownloadFlag)
}
if HashsumOutfile == "" {
return operations.HashLister(context.Background(), ht, OutputBase64, DownloadFlag, fsrc, nil)
}

View File

@@ -32,10 +32,6 @@ hashed locally enabling MD5 for any remote.
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
if hashsum.ChecksumFile != "" {
fsum, sumFile := cmd.NewFsFile(hashsum.ChecksumFile)
return operations.CheckSum(context.Background(), fsrc, fsum, sumFile, hash.MD5, nil, hashsum.DownloadFlag)
}
if hashsum.HashsumOutfile == "" {
return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil)
}

View File

@@ -1,6 +1,6 @@
// Daemonization interface for non-Unix variants only
// +build windows plan9 js
// +build windows
package mountlib

View File

@@ -1,6 +1,6 @@
// Daemonization interface for Unix variants only
// +build !windows,!plan9,!js
// +build !windows
package mountlib

View File

@@ -1,302 +0,0 @@
package mountlib
// "@" will be replaced by the command name, "|" will be replaced by backticks
var mountHelp = `
rclone @ allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with
FUSE.
First set up your remote using |rclone config|. Check it works with |rclone ls| etc.
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
Mount runs in foreground mode by default, use the |--daemon| flag to specify background mode.
You can only run mount in foreground mode on Windows.
On Linux/macOS/FreeBSD start the mount like this, where |/path/to/local/mount|
is an **empty** **existing** directory:
rclone @ remote:path/to/files /path/to/local/mount
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. The following examples will mount to an automatically assigned drive,
to specific drive letter |X:|, to path |C:\path\parent\mount|
(where parent directory or drive must exist, and mount must **not** exist,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
the last example will mount as network share |\\cloud\remote| and map it to an
automatically assigned drive:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files \\cloud\remote
When the program ends while in foreground mode, either via Ctrl+C or receiving
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
When running in background mode the user will have to stop the mount manually:
# Linux
fusermount -u /path/to/local/mount
# OS X
umount /path/to/local/mount
The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually.
The size of the mounted file system will be set according to information retrieved
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
command. Remotes with unlimited storage may report the used size only,
then an additional 1 PiB of free space is assumed. If the remote does not
[support](https://rclone.org/overview/#optional-features) the about feature
at all, then 1 PiB is set as both the total and the free size.
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
or newer on some platforms depending on the underlying FUSE library in use.
### Installing on Windows
To run rclone @ on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/).
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
Windows File System Proxy which makes it easy to write user space file
systems for Windows. It provides a FUSE emulation layer which rclone
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
Both of these packages are by Bill Zissimopoulos who was very helpful
during the implementation of rclone @ for Windows.
#### Mounting modes on windows
Unlike other operating systems, Microsoft Windows provides a different filesystem
type for network and fixed drives. It optimises access on the assumption fixed
disk drives are fast and reliable, while network drives have relatively high latency
and less reliability. Some settings can also be differentiated between the two types,
for example that Windows Explorer should just display icons and not create preview
thumbnails for image and video files on network drives.
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
However, you can also choose to mount it as a remote network drive, often described
as a network share. If you mount an rclone remote using the default, fixed drive mode
and experience unexpected program errors, freezes or other issues, consider mounting
as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **non-existent** subdirectory of an **existing** parent
directory or drive. Using the special value |*| will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving backward.
Examples:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files X:
Option |--volname| can be used to set a custom volume name for the mounted
file system. The default is to use the remote name and path.
To mount as network drive, you can add option |--network-mode|
to your @ command. Mounting to a directory path is not supported in
this mode, it is a limitation Windows imposes on junctions, so the remote must always
be mounted to a drive letter.
rclone @ remote:path/to/files X: --network-mode
A volume name specified with |--volname| will be used to create the network share path.
A complete UNC path, such as |\\cloud\remote|, optionally with path
|\\cloud\remote\madeup\path|, will be used as is. Any other
string will be used as the share part, after a default prefix |\\server\|.
If no volume name is specified then |\\server\share| will be used.
You must make sure the volume name is unique when you are mounting more than one drive,
or else the mount command will fail. The share name will treated as the volume label for
the mapped drive, shown in Windows Explorer etc, while the complete
|\\server\share| will be reported as the remote UNC path by
|net use| etc, just like a normal network drive mapping.
If you specify a full network share UNC path with |--volname|, this will implicitely
set the |--network-mode| option, so the following two examples have same result:
rclone @ remote:path/to/files X: --network-mode
rclone @ remote:path/to/files X: --volname \\server\share
You may also specify the network share UNC path as the mountpoint itself. Then rclone
will automatically assign a drive letter, same as with |*| and use that as
mountpoint, and instead use the UNC path specified as the volume name, as if it were
specified with the |--volname| option. This will also implicitely set
the |--network-mode| option. This means the following two examples have same result:
rclone @ remote:path/to/files \\cloud\remote
rclone @ remote:path/to/files * --volname \\cloud\remote
There is yet another way to enable network mode, and to set the share path,
and that is to pass the "native" libfuse/WinFsp option directly:
|--fuse-flag --VolumePrefix=\server\share|. Note that the path
must be with just a single backslash prefix in this case.
*Note:* In previous versions of rclone this was the only supported method.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
See also [Limitations](#limitations) section below.
#### Windows filesystem permissions
The FUSE emulation layer on Windows must convert between the POSIX-based
permission model used in FUSE, and the permission model used in Windows,
based on access-control lists (ACL).
The mounted filesystem will normally get three entries in its access-control list (ACL),
representing permissions for the POSIX permission scopes: Owner, group and others.
By default, the owner and group will be taken from the current user, and the built-in
group "Everyone" will be used to represent others. The user/group can be customized
with FUSE options "UserName" and "GroupName",
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
The permissions on each entry will be set according to
[options](#options) |--dir-perms| and |--file-perms|,
which takes a value in traditional [numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation),
where the default corresponds to |--file-perms 0666 --dir-perms 0777|.
Note that the mapping of permissions is not always trivial, and the result
you see in Windows Explorer may not be exactly like you expected.
For example, when setting a value that includes write access, this will be
mapped to individual permissions "write attributes", "write data" and "append data",
but not "write extended attributes". Windows will then show this as basic
permission "Special" instead of "Write", because "Write" includes the
"write extended attributes" permission.
If you set POSIX permissions for only allowing access to the owner, using
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
group will still be given some special permissions, such as "read attributes"
and "read permissions", in Windows. This is done for compatibility reasons,
e.g. to allow users without additional permissions to be able to read basic
metadata about files like in UNIX. One case that may arise is that other programs
(incorrectly) interprets this as the file being accessible by everyone. For example
an SSH client may warn about "unprotected private key file".
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
that allows the complete specification of file security descriptors using
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
With this you can work around issues such as the mentioned "unprotected private key file"
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
#### Windows caveats
Drives created as Administrator are not visible to other accounts,
not even an account that was elevated to Administrator with the
User Account Control (UAC) feature. A result of this is that if you mount
to a drive letter from a Command Prompt run as Administrator, and then try
to access the same drive from Windows Explorer (which does not run as
Administrator), you will not be able to see the mounted drive.
If you don't need to access the drive from applications running with
administrative privileges, the easiest way around this is to always
create the mount from a non-elevated command prompt.
To make mapped drives available to the user account that created them
regardless if elevated or not, there is a special Windows setting called
[linked connections](https://docs.microsoft.com/en-us/troubleshoot/windows-client/networking/mapped-drives-not-available-from-elevated-command#detail-to-configure-the-enablelinkedconnections-registry-entry)
that can be enabled.
It is also possible to make a drive mount available to everyone on the system,
by running the process creating it as the built-in SYSTEM account.
There are several ways to do this: One is to use the command-line
utility [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec),
from Microsoft's Sysinternals suite, which has option |-s| to start
processes as the SYSTEM account. Another alternative is to run the mount
command from a Windows Scheduled Task, or a Windows Service, configured
to run as the SYSTEM account. A third alternative is to use the
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
Note that when running rclone as another user, it will not use
the configuration file from your profile unless you tell it to
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
Read more in the [install documentation](https://rclone.org/install/).
Note that mapping to a directory path, instead of a drive letter,
does not suffer from the same limitations.
### Limitations
Without the use of |--vfs-cache-mode| this can only write files
sequentially, it can only seek when reading. This means that many
applications won't work with their files on an rclone mount without
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
See the [VFS File Caching](#vfs-file-caching) section for more info.
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
Hubic) do not support the concept of empty directories, so empty
directories will have a tendency to disappear once they fall out of
the directory cache.
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
### rclone @ vs rclone sync/copy
File systems expect things to be 100% reliable, whereas cloud storage
systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone @
can't use retries in the same way without making local copies of the
uploads. Look at the [VFS File Caching](#vfs-file-caching)
for solutions to make @ more reliable.
### Attribute caching
You can use the flag |--attr-timeout| to set the time the kernel caches
the attributes (size, modification time, etc.) for directory entries.
The default is |1s| which caches files just long enough to avoid
too many callbacks to rclone from the kernel.
In theory 0s should be the correct value for filesystems which can
change outside the control of the kernel. However this causes quite a
few problems such as
[rclone using too much memory](https://github.com/rclone/rclone/issues/2157),
[rclone not serving files to samba](https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112)
and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
The kernel can cache the info about a file for the time given by
|--attr-timeout|. You may see corruption if the remote file changes
length during this window. It will show up as either a truncated file
or a file with garbage on the end. With |--attr-timeout 1s| this is
very unlikely but not impossible. The higher you set |--attr-timeout|
the more likely it is. The default setting of "1s" is the lowest
setting which mitigates the problems above.
If you set it higher (|10s| or |1m| say) then the kernel will call
back to rclone less often making it more efficient, however there is
more chance of the corruption issue above.
If files don't change on the remote outside of the control of rclone
then there is no chance of corruption.
This is the same as setting the attr_timeout option in mount.fuse.
### Filters
Note that all the rclone filters can be used to select a subset of the
files to be visible in the mount.
### systemd
When running rclone @ as a systemd service, it is possible
to use Type=notify. In this case the service will enter the started state
after the mountpoint has been successfully set up.
Units having the rclone @ service specified as a requirement
will see all files and folders immediately in this mode.
### chunked reading
|--vfs-read-chunk-size| will enable reading the source objects in parts.
This can reduce the used download quota for some remotes by requesting only chunks
from the remote that are actually read at the cost of an increased number of requests.
When |--vfs-read-chunk-size-limit| is also specified and greater than
|--vfs-read-chunk-size|, the chunk size for each open file will get doubled
for each chunk read, until the specified value is reached. A value of |-1| will disable
the limit and the chunk size will grow indefinitely.
With |--vfs-read-chunk-size 100M| and |--vfs-read-chunk-size-limit 0|
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When |--vfs-read-chunk-size-limit 500M| is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
`

View File

@@ -1,14 +1,19 @@
package mountlib
import (
"io"
"log"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
@@ -16,11 +21,7 @@ import (
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -62,19 +63,6 @@ type (
MountFn func(VFS *vfs.VFS, mountpoint string, opt *Options) (<-chan error, func() error, error)
)
// MountPoint represents a mount with options and runtime state
type MountPoint struct {
MountPoint string
MountedOn time.Time
MountOpt Options
VFSOpt vfscommon.Options
Fs fs.Fs
VFS *vfs.VFS
MountFn MountFn
UnmountFn UnmountFn
ErrChan <-chan error
}
// Global constants
const (
MaxLeafSize = 1024 // don't pass file names longer than this
@@ -118,37 +106,424 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive. Supported on Windows only")
}
// Check if folder is empty
func checkMountEmpty(mountpoint string) error {
fp, fpErr := os.Open(mountpoint)
if fpErr != nil {
return errors.Wrap(fpErr, "Can not open: "+mountpoint)
}
defer fs.CheckClose(fp, &fpErr)
_, fpErr = fp.Readdirnames(1)
// directory is not empty
if fpErr != io.EOF {
var e error
var errorMsg = "Directory is not empty: " + mountpoint + " If you want to mount it anyway use: --allow-non-empty option"
if fpErr == nil {
e = errors.New(errorMsg)
} else {
e = errors.Wrap(fpErr, errorMsg)
}
return e
}
return nil
}
// Check the root doesn't overlap the mountpoint
func checkMountpointOverlap(root, mountpoint string) error {
abs := func(x string) string {
if absX, err := filepath.EvalSymlinks(x); err == nil {
x = absX
}
if absX, err := filepath.Abs(x); err == nil {
x = absX
}
x = filepath.ToSlash(x)
if !strings.HasSuffix(x, "/") {
x += "/"
}
return x
}
rootAbs, mountpointAbs := abs(root), abs(mountpoint)
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
return errors.Errorf("mount point %q and directory to be mounted %q mustn't overlap", mountpoint, root)
}
return nil
}
// NewMountCommand makes a mount command with the given name and Mount function
func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Command {
var commandDefinition = &cobra.Command{
Use: commandName + " remote:path /path/to/mountpoint",
Hidden: hidden,
Short: `Mount the remote as file system on a mountpoint.`,
Long: strings.ReplaceAll(strings.ReplaceAll(mountHelp, "|", "`"), "@", commandName) + vfs.Help,
// Warning! "|" will be replaced by backticks below
// "@" will be replaced by the command name
Long: strings.ReplaceAll(strings.ReplaceAll(`
rclone @ allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with
FUSE.
First set up your remote using |rclone config|. Check it works with |rclone ls| etc.
On Linux and OSX, you can either run mount in foreground mode or background (daemon) mode.
Mount runs in foreground mode by default, use the |--daemon| flag to specify background mode.
You can only run mount in foreground mode on Windows.
On Linux/macOS/FreeBSD start the mount like this, where |/path/to/local/mount|
is an **empty** **existing** directory:
rclone @ remote:path/to/files /path/to/local/mount
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. The following examples will mount to an automatically assigned drive,
to specific drive letter |X:|, to path |C:\path\parent\mount|
(where parent directory or drive must exist, and mount must **not** exist,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
the last example will mount as network share |\\cloud\remote| and map it to an
automatically assigned drive:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files \\cloud\remote
When the program ends while in foreground mode, either via Ctrl+C or receiving
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
When running in background mode the user will have to stop the mount manually:
# Linux
fusermount -u /path/to/local/mount
# OS X
umount /path/to/local/mount
The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually.
The size of the mounted file system will be set according to information retrieved
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
command. Remotes with unlimited storage may report the used size only,
then an additional 1 PiB of free space is assumed. If the remote does not
[support](https://rclone.org/overview/#optional-features) the about feature
at all, then 1 PiB is set as both the total and the free size.
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
or newer on some platforms depending on the underlying FUSE library in use.
### Installing on Windows
To run rclone @ on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/).
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
Windows File System Proxy which makes it easy to write user space file
systems for Windows. It provides a FUSE emulation layer which rclone
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
Both of these packages are by Bill Zissimopoulos who was very helpful
during the implementation of rclone @ for Windows.
#### Mounting modes on windows
Unlike other operating systems, Microsoft Windows provides a different filesystem
type for network and fixed drives. It optimises access on the assumption fixed
disk drives are fast and reliable, while network drives have relatively high latency
and less reliability. Some settings can also be differentiated between the two types,
for example that Windows Explorer should just display icons and not create preview
thumbnails for image and video files on network drives.
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
However, you can also choose to mount it as a remote network drive, often described
as a network share. If you mount an rclone remote using the default, fixed drive mode
and experience unexpected program errors, freezes or other issues, consider mounting
as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **non-existent** subdirectory of an **existing** parent
directory or drive. Using the special value |*| will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving backward.
Examples:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files X:
Option |--volname| can be used to set a custom volume name for the mounted
file system. The default is to use the remote name and path.
To mount as network drive, you can add option |--network-mode|
to your @ command. Mounting to a directory path is not supported in
this mode, it is a limitation Windows imposes on junctions, so the remote must always
be mounted to a drive letter.
rclone @ remote:path/to/files X: --network-mode
A volume name specified with |--volname| will be used to create the network share path.
A complete UNC path, such as |\\cloud\remote|, optionally with path
|\\cloud\remote\madeup\path|, will be used as is. Any other
string will be used as the share part, after a default prefix |\\server\|.
If no volume name is specified then |\\server\share| will be used.
You must make sure the volume name is unique when you are mounting more than one drive,
or else the mount command will fail. The share name will treated as the volume label for
the mapped drive, shown in Windows Explorer etc, while the complete
|\\server\share| will be reported as the remote UNC path by
|net use| etc, just like a normal network drive mapping.
If you specify a full network share UNC path with |--volname|, this will implicitely
set the |--network-mode| option, so the following two examples have same result:
rclone @ remote:path/to/files X: --network-mode
rclone @ remote:path/to/files X: --volname \\server\share
You may also specify the network share UNC path as the mountpoint itself. Then rclone
will automatically assign a drive letter, same as with |*| and use that as
mountpoint, and instead use the UNC path specified as the volume name, as if it were
specified with the |--volname| option. This will also implicitely set
the |--network-mode| option. This means the following two examples have same result:
rclone @ remote:path/to/files \\cloud\remote
rclone @ remote:path/to/files * --volname \\cloud\remote
There is yet another way to enable network mode, and to set the share path,
and that is to pass the "native" libfuse/WinFsp option directly:
|--fuse-flag --VolumePrefix=\server\share|. Note that the path
must be with just a single backslash prefix in this case.
*Note:* In previous versions of rclone this was the only supported method.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
See also [Limitations](#limitations) section below.
#### Windows filesystem permissions
The FUSE emulation layer on Windows must convert between the POSIX-based
permission model used in FUSE, and the permission model used in Windows,
based on access-control lists (ACL).
The mounted filesystem will normally get three entries in its access-control list (ACL),
representing permissions for the POSIX permission scopes: Owner, group and others.
By default, the owner and group will be taken from the current user, and the built-in
group "Everyone" will be used to represent others. The user/group can be customized
with FUSE options "UserName" and "GroupName",
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
The permissions on each entry will be set according to
[options](#options) |--dir-perms| and |--file-perms|,
which takes a value in traditional [numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation),
where the default corresponds to |--file-perms 0666 --dir-perms 0777|.
Note that the mapping of permissions is not always trivial, and the result
you see in Windows Explorer may not be exactly like you expected.
For example, when setting a value that includes write access, this will be
mapped to individual permissions "write attributes", "write data" and "append data",
but not "write extended attributes". Windows will then show this as basic
permission "Special" instead of "Write", because "Write" includes the
"write extended attributes" permission.
If you set POSIX permissions for only allowing access to the owner, using
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
group will still be given some special permissions, such as "read attributes"
and "read permissions", in Windows. This is done for compatibility reasons,
e.g. to allow users without additional permissions to be able to read basic
metadata about files like in UNIX. One case that may arise is that other programs
(incorrectly) interprets this as the file being accessible by everyone. For example
an SSH client may warn about "unprotected private key file".
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
that allows the complete specification of file security descriptors using
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
With this you can work around issues such as the mentioned "unprotected private key file"
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
#### Windows caveats
Drives created as Administrator are not visible to other accounts,
not even an account that was elevated to Administrator with the
User Account Control (UAC) feature. A result of this is that if you mount
to a drive letter from a Command Prompt run as Administrator, and then try
to access the same drive from Windows Explorer (which does not run as
Administrator), you will not be able to see the mounted drive.
If you don't need to access the drive from applications running with
administrative privileges, the easiest way around this is to always
create the mount from a non-elevated command prompt.
To make mapped drives available to the user account that created them
regardless if elevated or not, there is a special Windows setting called
[linked connections](https://docs.microsoft.com/en-us/troubleshoot/windows-client/networking/mapped-drives-not-available-from-elevated-command#detail-to-configure-the-enablelinkedconnections-registry-entry)
that can be enabled.
It is also possible to make a drive mount available to everyone on the system,
by running the process creating it as the built-in SYSTEM account.
There are several ways to do this: One is to use the command-line
utility [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec),
from Microsoft's Sysinternals suite, which has option |-s| to start
processes as the SYSTEM account. Another alternative is to run the mount
command from a Windows Scheduled Task, or a Windows Service, configured
to run as the SYSTEM account. A third alternative is to use the
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
Note that when running rclone as another user, it will not use
the configuration file from your profile unless you tell it to
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
Read more in the [install documentation](https://rclone.org/install/).
Note that mapping to a directory path, instead of a drive letter,
does not suffer from the same limitations.
### Limitations
Without the use of |--vfs-cache-mode| this can only write files
sequentially, it can only seek when reading. This means that many
applications won't work with their files on an rclone mount without
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
See the [VFS File Caching](#vfs-file-caching) section for more info.
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
Hubic) do not support the concept of empty directories, so empty
directories will have a tendency to disappear once they fall out of
the directory cache.
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
### rclone @ vs rclone sync/copy
File systems expect things to be 100% reliable, whereas cloud storage
systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone @
can't use retries in the same way without making local copies of the
uploads. Look at the [VFS File Caching](#vfs-file-caching)
for solutions to make @ more reliable.
### Attribute caching
You can use the flag |--attr-timeout| to set the time the kernel caches
the attributes (size, modification time, etc.) for directory entries.
The default is |1s| which caches files just long enough to avoid
too many callbacks to rclone from the kernel.
In theory 0s should be the correct value for filesystems which can
change outside the control of the kernel. However this causes quite a
few problems such as
[rclone using too much memory](https://github.com/rclone/rclone/issues/2157),
[rclone not serving files to samba](https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112)
and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
The kernel can cache the info about a file for the time given by
|--attr-timeout|. You may see corruption if the remote file changes
length during this window. It will show up as either a truncated file
or a file with garbage on the end. With |--attr-timeout 1s| this is
very unlikely but not impossible. The higher you set |--attr-timeout|
the more likely it is. The default setting of "1s" is the lowest
setting which mitigates the problems above.
If you set it higher (|10s| or |1m| say) then the kernel will call
back to rclone less often making it more efficient, however there is
more chance of the corruption issue above.
If files don't change on the remote outside of the control of rclone
then there is no chance of corruption.
This is the same as setting the attr_timeout option in mount.fuse.
### Filters
Note that all the rclone filters can be used to select a subset of the
files to be visible in the mount.
### systemd
When running rclone @ as a systemd service, it is possible
to use Type=notify. In this case the service will enter the started state
after the mountpoint has been successfully set up.
Units having the rclone @ service specified as a requirement
will see all files and folders immediately in this mode.
### chunked reading
|--vfs-read-chunk-size| will enable reading the source objects in parts.
This can reduce the used download quota for some remotes by requesting only chunks
from the remote that are actually read at the cost of an increased number of requests.
When |--vfs-read-chunk-size-limit| is also specified and greater than
|--vfs-read-chunk-size|, the chunk size for each open file will get doubled
for each chunk read, until the specified value is reached. A value of |-1| will disable
the limit and the chunk size will grow indefinitely.
With |--vfs-read-chunk-size 100M| and |--vfs-read-chunk-size-limit 0|
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When |--vfs-read-chunk-size-limit 500M| is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
`, "|", "`"), "@", commandName) + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
opt := Opt // make a copy of the options
if Opt.Daemon {
if opt.Daemon {
config.PassConfigKeyForDaemonization = true
}
mountpoint := args[1]
fdst := cmd.NewFsDir(args)
if fdst.Name() == "" || fdst.Name() == "local" {
err := checkMountpointOverlap(fdst.Root(), mountpoint)
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
}
// Show stats if the user has specifically requested them
if cmd.ShowStats() {
defer cmd.StartStats()()
}
mnt := &MountPoint{
MountFn: mount,
MountPoint: args[1],
Fs: cmd.NewFsDir(args),
MountOpt: Opt,
VFSOpt: vfsflags.Opt,
// Inform about ignored flags on Windows,
// and if not on Windows and not --allow-non-empty flag is used
// verify that mountpoint is empty.
if runtime.GOOS == "windows" {
if opt.AllowNonEmpty {
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
}
if opt.AllowRoot {
fs.Logf(nil, "--allow-root flag does nothing on Windows")
}
if opt.AllowOther {
fs.Logf(nil, "--allow-other flag does nothing on Windows")
}
} else if !opt.AllowNonEmpty {
err := checkMountEmpty(mountpoint)
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
}
daemonized, err := mnt.Mount()
if !daemonized && err == nil {
err = mnt.Wait()
// Work out the volume name, removing special
// characters from it if necessary
if opt.VolumeName == "" {
opt.VolumeName = fdst.Name() + ":" + fdst.Root()
}
opt.VolumeName = strings.Replace(opt.VolumeName, ":", " ", -1)
opt.VolumeName = strings.Replace(opt.VolumeName, "/", " ", -1)
opt.VolumeName = strings.TrimSpace(opt.VolumeName)
if runtime.GOOS == "windows" && len(opt.VolumeName) > 32 {
opt.VolumeName = opt.VolumeName[:32]
}
// Start background task if --background is specified
if opt.Daemon {
daemonized := startBackgroundMode()
if daemonized {
return
}
}
VFS := vfs.New(fdst, &vfsflags.Opt)
err := Mount(VFS, mountpoint, mount, &opt)
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
@@ -166,94 +541,49 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
return commandDefinition
}
// Mount the remote at mountpoint
func (m *MountPoint) Mount() (daemonized bool, err error) {
if err = m.CheckOverlap(); err != nil {
return false, err
}
if err = m.CheckAllowings(); err != nil {
return false, err
}
m.SetVolumeName(m.MountOpt.VolumeName)
// Start background task if --daemon is specified
if m.MountOpt.Daemon {
daemonized = startBackgroundMode()
if daemonized {
return true, nil
// ClipBlocks clips the blocks pointed to the OS max
func ClipBlocks(b *uint64) {
var max uint64
switch runtime.GOOS {
case "windows":
if runtime.GOARCH == "386" {
max = (1 << 32) - 1
} else {
max = (1 << 43) - 1
}
case "darwin":
// OSX FUSE only supports 32 bit number of blocks
// https://github.com/osxfuse/osxfuse/issues/396
max = (1 << 32) - 1
default:
// no clipping
return
}
if *b > max {
*b = max
}
}
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
if opt == nil {
opt = &DefaultOpt
}
m.VFS = vfs.New(m.Fs, &m.VFSOpt)
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
// Mount it
errChan, unmount, err := mount(VFS, mountpoint, opt)
if err != nil {
return false, errors.Wrap(err, "failed to mount FUSE fs")
return errors.Wrap(err, "failed to mount FUSE fs")
}
return false, nil
}
// CheckOverlap checks that root doesn't overlap with mountpoint
func (m *MountPoint) CheckOverlap() error {
name := m.Fs.Name()
if name != "" && name != "local" {
return nil
}
rootAbs := absPath(m.Fs.Root())
mountpointAbs := absPath(m.MountPoint)
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
const msg = "mount point %q and directory to be mounted %q mustn't overlap"
return errors.Errorf(msg, m.MountPoint, m.Fs.Root())
}
return nil
}
// absPath is a helper function for MountPoint.CheckOverlap
func absPath(path string) string {
if abs, err := filepath.EvalSymlinks(path); err == nil {
path = abs
}
if abs, err := filepath.Abs(path); err == nil {
path = abs
}
path = filepath.ToSlash(path)
if !strings.HasSuffix(path, "/") {
path += "/"
}
return path
}
// CheckAllowings informs about ignored flags on Windows. If not on Windows
// and not --allow-non-empty flag is used, verify that mountpoint is empty.
func (m *MountPoint) CheckAllowings() error {
opt := &m.MountOpt
if runtime.GOOS == "windows" {
if opt.AllowNonEmpty {
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
}
if opt.AllowRoot {
fs.Logf(nil, "--allow-root flag does nothing on Windows")
}
if opt.AllowOther {
fs.Logf(nil, "--allow-other flag does nothing on Windows")
}
return nil
}
if !opt.AllowNonEmpty {
return CheckMountEmpty(m.MountPoint)
}
return nil
}
// Wait for mount end
func (m *MountPoint) Wait() error {
// Unmount on exit
var finaliseOnce sync.Once
finalise := func() {
finaliseOnce.Do(func() {
_ = sysdnotify.Stopping()
_ = m.UnmountFn()
_ = unmount()
})
}
fnHandle := atexit.Register(finalise)
@@ -266,20 +596,19 @@ func (m *MountPoint) Wait() error {
// Reload VFS cache on SIGHUP
sigHup := make(chan os.Signal, 1)
NotifyOnSigHup(sigHup)
var err error
signal.Notify(sigHup, syscall.SIGHUP)
waiting := true
for waiting {
waitloop:
for {
select {
// umount triggered outside the app
case err = <-m.ErrChan:
waiting = false
case err = <-errChan:
break waitloop
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := m.VFS.Root()
root, err := VFS.Root()
if err != nil {
fs.Errorf(m.VFS.Fs(), "Error reading root: %v", err)
fs.Errorf(VFS.Fs(), "Error reading root: %v", err)
} else {
root.ForgetAll()
}
@@ -291,29 +620,6 @@ func (m *MountPoint) Wait() error {
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")
}
return nil
}
// Unmount the specified mountpoint
func (m *MountPoint) Unmount() (err error) {
return m.UnmountFn()
}
// SetVolumeName with sensible default
func (m *MountPoint) SetVolumeName(vol string) {
if vol == "" {
vol = m.Fs.Name() + ":" + m.Fs.Root()
}
m.MountOpt.SetVolumeName(vol)
}
// SetVolumeName removes special characters from volume name if necessary
func (opt *Options) SetVolumeName(vol string) {
vol = strings.ReplaceAll(vol, ":", " ")
vol = strings.ReplaceAll(vol, "/", " ")
vol = strings.TrimSpace(vol)
if runtime.GOOS == "windows" && len(vol) > 32 {
vol = vol[:32]
}
opt.VolumeName = vol
}

View File

@@ -11,33 +11,29 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
)
// MountInfo defines the configuration for a mount
type MountInfo struct {
unmountFn UnmountFn
MountPoint string `json:"MountPoint"`
MountedOn time.Time `json:"MountedOn"`
Fs string `json:"Fs"`
MountOpt *Options
VFSOpt *vfscommon.Options
}
var (
// mutex to protect all the variables in this block
mountMu sync.Mutex
// Mount functions available
mountFns = map[string]MountFn{}
// Map of mounted path => MountInfo
liveMounts = map[string]*MountPoint{}
// Supported mount types
supportedMountTypes = []string{"mount", "cmount", "mount2"}
liveMounts = map[string]MountInfo{}
)
// ResolveMountMethod returns mount function by name
func ResolveMountMethod(mountType string) (string, MountFn) {
if mountType != "" {
return mountType, mountFns[mountType]
}
for _, mountType := range supportedMountTypes {
if mountFns[mountType] != nil {
return mountType, mountFns[mountType]
}
}
return "", nil
}
// AddRc adds mount and unmount functionality to rc
func AddRc(mountUtilName string, mountFunction MountFn) {
mountMu.Lock()
@@ -103,12 +99,14 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
mountMu.Lock()
defer mountMu.Unlock()
if err != nil {
mountType = ""
}
mountType, mountFn := ResolveMountMethod(mountType)
if mountFn == nil {
return nil, errors.New("Mount Option specified is not registered, or is invalid")
if err != nil || mountType == "" {
if mountFns["mount"] != nil {
mountType = "mount"
} else if mountFns["cmount"] != nil {
mountType = "cmount"
} else if mountFns["mount2"] != nil {
mountType = "mount2"
}
}
// Get Fs.fs to be mounted from fs parameter in the params
@@ -117,26 +115,28 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
return nil, err
}
VFS := vfs.New(fdst, &vfsOpt)
_, unmountFn, err := mountFn(VFS, mountPoint, &mountOpt)
if err != nil {
log.Printf("mount FAILED: %v", err)
return nil, err
}
if mountFns[mountType] != nil {
VFS := vfs.New(fdst, &vfsOpt)
_, unmountFn, err := mountFns[mountType](VFS, mountPoint, &mountOpt)
// Add mount to list if mount point was successfully created
liveMounts[mountPoint] = &MountPoint{
MountPoint: mountPoint,
MountedOn: time.Now(),
MountFn: mountFn,
UnmountFn: unmountFn,
MountOpt: mountOpt,
VFSOpt: vfsOpt,
Fs: fdst,
}
if err != nil {
log.Printf("mount FAILED: %v", err)
return nil, err
}
// Add mount to list if mount point was successfully created
liveMounts[mountPoint] = MountInfo{
unmountFn: unmountFn,
MountedOn: time.Now(),
Fs: fdst.Name(),
MountPoint: mountPoint,
VFSOpt: &vfsOpt,
MountOpt: &mountOpt,
}
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
return nil, nil
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
return nil, nil
}
return nil, errors.New("Mount Option specified is not registered, or is invalid")
}
func init() {
@@ -169,14 +169,10 @@ func unMountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
}
mountMu.Lock()
defer mountMu.Unlock()
mountInfo, found := liveMounts[mountPoint]
if !found {
return nil, errors.New("mount not found")
}
if err = mountInfo.Unmount(); err != nil {
err = performUnMount(mountPoint)
if err != nil {
return nil, err
}
delete(liveMounts, mountPoint)
return nil, nil
}
@@ -235,34 +231,16 @@ Eg
})
}
// MountInfo is a transitional structure for json marshaling
type MountInfo struct {
Fs string `json:"Fs"`
MountPoint string `json:"MountPoint"`
MountedOn time.Time `json:"MountedOn"`
}
// listMountsRc returns a list of current mounts sorted by mount path
// listMountsRc returns a list of current mounts
func listMountsRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
var mountTypes = []MountInfo{}
mountMu.Lock()
defer mountMu.Unlock()
var keys []string
for key := range liveMounts {
keys = append(keys, key)
}
sort.Strings(keys)
mountPoints := []MountInfo{}
for _, k := range keys {
m := liveMounts[k]
info := MountInfo{
Fs: m.Fs.Name(),
MountPoint: m.MountPoint,
MountedOn: m.MountedOn,
}
mountPoints = append(mountPoints, info)
for _, a := range liveMounts {
mountTypes = append(mountTypes, a)
}
return rc.Params{
"mountPoints": mountPoints,
"mountPoints": mountTypes,
}, nil
}
@@ -287,12 +265,27 @@ Eg
func unmountAll(_ context.Context, in rc.Params) (out rc.Params, err error) {
mountMu.Lock()
defer mountMu.Unlock()
for mountPoint, mountInfo := range liveMounts {
if err = mountInfo.Unmount(); err != nil {
fs.Debugf(nil, "Couldn't unmount : %s", mountPoint)
for key, mountInfo := range liveMounts {
err = performUnMount(mountInfo.MountPoint)
if err != nil {
fs.Debugf(nil, "Couldn't unmount : %s", key)
return nil, err
}
delete(liveMounts, mountPoint)
}
return nil, nil
}
// performUnMount unmounts the specified mountPoint
func performUnMount(mountPoint string) (err error) {
mountInfo, ok := liveMounts[mountPoint]
if ok {
err := mountInfo.unmountFn()
if err != nil {
return err
}
delete(liveMounts, mountPoint)
} else {
return errors.New("mount not found")
}
return nil
}

View File

@@ -13,7 +13,6 @@ import (
_ "github.com/rclone/rclone/cmd/cmount"
_ "github.com/rclone/rclone/cmd/mount"
_ "github.com/rclone/rclone/cmd/mount2"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
@@ -96,22 +95,6 @@ func TestRc(t *testing.T) {
assert.Equal(t, os.FileMode(0400), fi.Mode())
}
// check mount point list
checkMountList := func() []mountlib.MountInfo {
listCall := rc.Calls.Get("mount/listmounts")
require.NotNil(t, listCall)
listReply, err := listCall.Fn(ctx, rc.Params{})
require.NoError(t, err)
mountPointsReply, err := listReply.Get("mountPoints")
require.NoError(t, err)
mountPoints, ok := mountPointsReply.([]mountlib.MountInfo)
require.True(t, ok)
return mountPoints
}
mountPoints := checkMountList()
require.Equal(t, 1, len(mountPoints))
require.Equal(t, mountPoint, mountPoints[0].MountPoint)
// FIXME the OS sometimes appears to be using the mount
// immediately after it appears so wait a moment
time.Sleep(100 * time.Millisecond)
@@ -119,7 +102,6 @@ func TestRc(t *testing.T) {
t.Run("Unmount", func(t *testing.T) {
_, err := unmount.Fn(ctx, in)
require.NoError(t, err)
assert.Equal(t, 0, len(checkMountList()))
})
})
}

View File

@@ -1,14 +0,0 @@
// +build !plan9,!js
package mountlib
import (
"os"
"os/signal"
"syscall"
)
// NotifyOnSigHup makes SIGHUP notify given channel on supported systems
func NotifyOnSigHup(sighupChan chan os.Signal) {
signal.Notify(sighupChan, syscall.SIGHUP)
}

View File

@@ -1,10 +0,0 @@
// +build plan9 js
package mountlib
import (
"os"
)
// NotifyOnSigHup makes SIGHUP notify given channel on supported systems
func NotifyOnSigHup(sighupChan chan os.Signal) {}

View File

@@ -1,55 +0,0 @@
package mountlib
import (
"io"
"os"
"runtime"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// CheckMountEmpty checks if folder is empty
func CheckMountEmpty(mountpoint string) error {
fp, fpErr := os.Open(mountpoint)
if fpErr != nil {
return errors.Wrap(fpErr, "Can not open: "+mountpoint)
}
defer fs.CheckClose(fp, &fpErr)
_, fpErr = fp.Readdirnames(1)
if fpErr == io.EOF {
return nil
}
msg := "Directory is not empty: " + mountpoint + " If you want to mount it anyway use: --allow-non-empty option"
if fpErr == nil {
return errors.New(msg)
}
return errors.Wrap(fpErr, msg)
}
// ClipBlocks clips the blocks pointed to the OS max
func ClipBlocks(b *uint64) {
var max uint64
switch runtime.GOOS {
case "windows":
if runtime.GOARCH == "386" {
max = (1 << 32) - 1
} else {
max = (1 << 43) - 1
}
case "darwin":
// OSX FUSE only supports 32 bit number of blocks
// https://github.com/osxfuse/osxfuse/issues/396
max = (1 << 32) - 1
default:
// no clipping
return
}
if *b > max {
*b = max
}
}

View File

@@ -337,8 +337,8 @@ func makeRandomExeName(baseName, extension string) (string, error) {
func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error {
osName := runtime.GOOS
arch := runtime.GOARCH
if osName == "darwin" {
osName = "osx"
if arch == "darwin" {
arch = "osx"
}
archiveFilename := fmt.Sprintf("rclone-%s-%s-%s.%s", version, osName, arch, packageFormat)

View File

@@ -1,175 +0,0 @@
package docker
import (
"encoding/json"
"net/http"
"github.com/go-chi/chi/v5"
"github.com/rclone/rclone/fs"
)
const (
contentType = "application/vnd.docker.plugins.v1.1+json"
activatePath = "/Plugin.Activate"
createPath = "/VolumeDriver.Create"
getPath = "/VolumeDriver.Get"
listPath = "/VolumeDriver.List"
removePath = "/VolumeDriver.Remove"
pathPath = "/VolumeDriver.Path"
mountPath = "/VolumeDriver.Mount"
unmountPath = "/VolumeDriver.Unmount"
capsPath = "/VolumeDriver.Capabilities"
)
// CreateRequest is the structure that docker's requests are deserialized to.
type CreateRequest struct {
Name string
Options map[string]string `json:"Opts,omitempty"`
}
// RemoveRequest structure for a volume remove request
type RemoveRequest struct {
Name string
}
// MountRequest structure for a volume mount request
type MountRequest struct {
Name string
ID string
}
// MountResponse structure for a volume mount response
type MountResponse struct {
Mountpoint string
}
// UnmountRequest structure for a volume unmount request
type UnmountRequest struct {
Name string
ID string
}
// PathRequest structure for a volume path request
type PathRequest struct {
Name string
}
// PathResponse structure for a volume path response
type PathResponse struct {
Mountpoint string
}
// GetRequest structure for a volume get request
type GetRequest struct {
Name string
}
// GetResponse structure for a volume get response
type GetResponse struct {
Volume *VolInfo
}
// ListResponse structure for a volume list response
type ListResponse struct {
Volumes []*VolInfo
}
// CapabilitiesResponse structure for a volume capability response
type CapabilitiesResponse struct {
Capabilities Capability
}
// Capability represents the list of capabilities a volume driver can return
type Capability struct {
Scope string
}
// ErrorResponse is a formatted error message that docker can understand
type ErrorResponse struct {
Err string
}
func newRouter(drv *Driver) http.Handler {
r := chi.NewRouter()
r.Post(activatePath, func(w http.ResponseWriter, r *http.Request) {
res := map[string]interface{}{
"Implements": []string{"VolumeDriver"},
}
encodeResponse(w, res, nil, activatePath)
})
r.Post(createPath, func(w http.ResponseWriter, r *http.Request) {
var req CreateRequest
if decodeRequest(w, r, &req) {
err := drv.Create(&req)
encodeResponse(w, nil, err, createPath)
}
})
r.Post(removePath, func(w http.ResponseWriter, r *http.Request) {
var req RemoveRequest
if decodeRequest(w, r, &req) {
err := drv.Remove(&req)
encodeResponse(w, nil, err, removePath)
}
})
r.Post(mountPath, func(w http.ResponseWriter, r *http.Request) {
var req MountRequest
if decodeRequest(w, r, &req) {
res, err := drv.Mount(&req)
encodeResponse(w, res, err, mountPath)
}
})
r.Post(pathPath, func(w http.ResponseWriter, r *http.Request) {
var req PathRequest
if decodeRequest(w, r, &req) {
res, err := drv.Path(&req)
encodeResponse(w, res, err, pathPath)
}
})
r.Post(getPath, func(w http.ResponseWriter, r *http.Request) {
var req GetRequest
if decodeRequest(w, r, &req) {
res, err := drv.Get(&req)
encodeResponse(w, res, err, getPath)
}
})
r.Post(unmountPath, func(w http.ResponseWriter, r *http.Request) {
var req UnmountRequest
if decodeRequest(w, r, &req) {
err := drv.Unmount(&req)
encodeResponse(w, nil, err, unmountPath)
}
})
r.Post(listPath, func(w http.ResponseWriter, r *http.Request) {
res, err := drv.List()
encodeResponse(w, res, err, listPath)
})
r.Post(capsPath, func(w http.ResponseWriter, r *http.Request) {
res := &CapabilitiesResponse{
Capabilities: Capability{Scope: pluginScope},
}
encodeResponse(w, res, nil, capsPath)
})
return r
}
func decodeRequest(w http.ResponseWriter, r *http.Request, req interface{}) bool {
if err := json.NewDecoder(r.Body).Decode(req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return false
}
return true
}
func encodeResponse(w http.ResponseWriter, res interface{}, err error, path string) {
w.Header().Set("Content-Type", contentType)
if err != nil {
fs.Debugf(path, "Request returned error: %v", err)
w.WriteHeader(http.StatusInternalServerError)
res = &ErrorResponse{Err: err.Error()}
} else if res == nil {
res = struct{}{}
}
if err = json.NewEncoder(w).Encode(res); err != nil {
fs.Debugf(path, "Response encoding failed: %v", err)
}
}

View File

@@ -1,31 +0,0 @@
ARG BASE_IMAGE=rclone/rclone:latest
ARG BUILD_PLATFORM=linux/amd64
ARG TARGET_PLATFORM=linux/amd64
# temporary build image
FROM --platform=${BUILD_PLATFORM} golang:alpine AS BUILD_ENV
COPY . /src
WORKDIR /src
RUN apk add --no-cache make git bash && \
CGO_ENABLED=0 \
GOARCH=$(echo ${TARGET_PLATFORM} | cut -d '/' -f2) \
make rclone
# plugin image
FROM ${BASE_IMAGE}
COPY --from=BUILD_ENV /src/rclone /usr/local/bin/rclone
RUN mkdir -p /data/config /data/cache /mnt \
&& /usr/local/bin/rclone version
ENV RCLONE_CONFIG=/data/config/rclone.conf
ENV RCLONE_CACHE_DIR=/data/cache
ENV RCLONE_BASE_DIR=/mnt
ENV RCLONE_VERBOSE=0
WORKDIR /data
ENTRYPOINT ["/usr/local/bin/rclone"]
CMD ["serve", "docker"]

View File

@@ -1,66 +0,0 @@
{
"description": "Rclone volume plugin for Docker",
"documentation": "https://rclone.org/",
"interface": {
"socket": "rclone.sock",
"types": ["docker.volumedriver/1.0"]
},
"linux": {
"capabilities": [
"CAP_SYS_ADMIN"
],
"devices": [
{
"path": "/dev/fuse"
}
]
},
"network": {
"type": "host"
},
"entrypoint": ["/usr/local/bin/rclone", "serve", "docker"],
"workdir": "/data",
"args": {
"name": "args",
"value": [],
"settable": ["value"]
},
"env": [
{
"name": "RCLONE_VERBOSE",
"value": "0",
"settable": ["value"]
},
{
"name": "RCLONE_CONFIG",
"value": "/data/config/rclone.conf"
},
{
"name": "RCLONE_CACHE_DIR",
"value": "/data/cache"
},
{
"name": "RCLONE_BASE_DIR",
"value": "/mnt"
}
],
"mounts": [
{
"name": "config",
"source": "/var/lib/docker-plugins/rclone/config",
"destination": "/data/config",
"type": "bind",
"options": ["rbind"],
"settable": ["source"]
},
{
"name": "cache",
"source": "/var/lib/docker-plugins/rclone/cache",
"destination": "/data/cache",
"type": "bind",
"options": ["rbind"],
"settable": ["source"]
}
],
"propagatedMount": "/mnt"
}

View File

@@ -1,19 +0,0 @@
[Unit]
Description=Docker Volume Plugin for rclone
Requires=docker.service
Before=docker.service
After=network.target
Requires=docker-volume-rclone.socket
After=docker-volume-rclone.socket
[Service]
ExecStart=/usr/bin/rclone serve docker
ExecStartPre=/bin/mkdir -p /var/lib/docker-volumes/rclone
ExecStartPre=/bin/mkdir -p /var/lib/docker-plugins/rclone/config
ExecStartPre=/bin/mkdir -p /var/lib/docker-plugins/rclone/cache
Environment=RCLONE_CONFIG=/var/lib/docker-plugins/rclone/config/rclone.conf
Environment=RCLONE_CACHE_DIR=/var/lib/docker-plugins/rclone/cache
Environment=RCLONE_VERBOSE=1
[Install]
WantedBy=multi-user.target

View File

@@ -1,8 +0,0 @@
[Unit]
Description=Docker Volume Plugin for rclone
[Socket]
ListenStream=/run/docker/plugins/rclone.sock
[Install]
WantedBy=sockets.target

View File

@@ -1,72 +0,0 @@
// Package docker serves a remote suitable for use with docker volume api
package docker
import (
"context"
"path/filepath"
"strings"
"syscall"
"github.com/spf13/cobra"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
var (
pluginName = "rclone"
pluginScope = "local"
baseDir = "/var/lib/docker-volumes/rclone"
sockDir = "/run/docker/plugins"
defSpecDir = "/etc/docker/plugins"
stateFile = "docker-plugin.state"
socketAddr = "" // TCP listening address or empty string for Unix socket
socketGid = syscall.Getgid()
canPersist = false // allows writing to config file
forgetState = false
noSpec = false
)
func init() {
cmdFlags := Command.Flags()
// Add command specific flags
flags.StringVarP(cmdFlags, &baseDir, "base-dir", "", baseDir, "base directory for volumes")
flags.StringVarP(cmdFlags, &socketAddr, "socket-addr", "", socketAddr, "<host:port> or absolute path (default: /run/docker/plugins/rclone.sock)")
flags.IntVarP(cmdFlags, &socketGid, "socket-gid", "", socketGid, "GID for unix socket (default: current process GID)")
flags.BoolVarP(cmdFlags, &forgetState, "forget-state", "", forgetState, "skip restoring previous state")
flags.BoolVarP(cmdFlags, &noSpec, "no-spec", "", noSpec, "do not write spec file")
// Add common mount/vfs flags
mountlib.AddFlags(cmdFlags)
vfsflags.AddFlags(cmdFlags)
}
// Command definition for cobra
var Command = &cobra.Command{
Use: "docker",
Short: `Serve any remote on docker's volume plugin API.`,
Long: strings.ReplaceAll(longHelp, "|", "`") + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
cmd.Run(false, false, command, func() error {
ctx := context.Background()
drv, err := NewDriver(ctx, baseDir, nil, nil, false, forgetState)
if err != nil {
return err
}
srv := NewServer(drv)
if socketAddr == "" {
// Listen on unix socket at /run/docker/plugins/<pluginName>.sock
return srv.ServeUnix(pluginName, socketGid)
}
if filepath.IsAbs(socketAddr) {
// Listen on unix socket at given path
return srv.ServeUnix(socketAddr, socketGid)
}
return srv.ServeTCP(socketAddr, "", nil, noSpec)
})
},
}

View File

@@ -1,414 +0,0 @@
package docker_test
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/cmd/serve/docker"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/cmd/cmount"
_ "github.com/rclone/rclone/cmd/mount"
)
func initialise(ctx context.Context, t *testing.T) (string, fs.Fs) {
fstest.Initialise()
// Make test cache directory
testDir, err := fstest.LocalRemote()
require.NoError(t, err)
err = os.MkdirAll(testDir, 0755)
require.NoError(t, err)
// Make test file system
testFs, err := fs.NewFs(ctx, testDir)
require.NoError(t, err)
return testDir, testFs
}
func assertErrorContains(t *testing.T, err error, errString string, msgAndArgs ...interface{}) {
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), errString, msgAndArgs...)
}
}
func assertVolumeInfo(t *testing.T, v *docker.VolInfo, name, path string) {
assert.Equal(t, name, v.Name)
assert.Equal(t, path, v.Mountpoint)
assert.NotEmpty(t, v.CreatedAt)
_, err := time.Parse(time.RFC3339, v.CreatedAt)
assert.NoError(t, err)
}
func TestDockerPluginLogic(t *testing.T) {
ctx := context.Background()
oldCacheDir := config.CacheDir
testDir, testFs := initialise(ctx, t)
config.CacheDir = testDir
defer func() {
config.CacheDir = oldCacheDir
if !t.Failed() {
fstest.Purge(testFs)
_ = os.RemoveAll(testDir)
}
}()
// Create dummy volume driver
drv, err := docker.NewDriver(ctx, testDir, nil, nil, true, true)
require.NoError(t, err)
require.NotNil(t, drv)
// 1st volume request
volReq := &docker.CreateRequest{
Name: "vol1",
Options: docker.VolOpts{},
}
assertErrorContains(t, drv.Create(volReq), "volume must have either remote or backend")
volReq.Options["remote"] = testDir
assert.NoError(t, drv.Create(volReq))
path1 := filepath.Join(testDir, "vol1")
assert.ErrorIs(t, drv.Create(volReq), docker.ErrVolumeExists)
getReq := &docker.GetRequest{Name: "vol1"}
getRes, err := drv.Get(getReq)
assert.NoError(t, err)
require.NotNil(t, getRes)
assertVolumeInfo(t, getRes.Volume, "vol1", path1)
// 2nd volume request
volReq.Name = "vol2"
assert.NoError(t, drv.Create(volReq))
path2 := filepath.Join(testDir, "vol2")
listRes, err := drv.List()
require.NoError(t, err)
require.Equal(t, 2, len(listRes.Volumes))
assertVolumeInfo(t, listRes.Volumes[0], "vol1", path1)
assertVolumeInfo(t, listRes.Volumes[1], "vol2", path2)
// Try prohibited volume options
volReq.Name = "vol99"
volReq.Options["remote"] = testDir
volReq.Options["type"] = "memory"
err = drv.Create(volReq)
assertErrorContains(t, err, "volume must have either remote or backend")
volReq.Options["persist"] = "WrongBoolean"
err = drv.Create(volReq)
assertErrorContains(t, err, "cannot parse option")
volReq.Options["persist"] = "true"
delete(volReq.Options, "remote")
err = drv.Create(volReq)
assertErrorContains(t, err, "persist remotes is prohibited")
volReq.Options["persist"] = "false"
volReq.Options["memory-option-broken"] = "some-value"
err = drv.Create(volReq)
assertErrorContains(t, err, "unsupported backend option")
getReq.Name = "vol99"
getRes, err = drv.Get(getReq)
assert.Error(t, err)
assert.Nil(t, getRes)
// Test mount requests
mountReq := &docker.MountRequest{
Name: "vol2",
ID: "id1",
}
mountRes, err := drv.Mount(mountReq)
assert.NoError(t, err)
require.NotNil(t, mountRes)
assert.Equal(t, path2, mountRes.Mountpoint)
mountRes, err = drv.Mount(mountReq)
assert.Error(t, err)
assert.Nil(t, mountRes)
assertErrorContains(t, err, "already mounted by this id")
mountReq.ID = "id2"
mountRes, err = drv.Mount(mountReq)
assert.NoError(t, err)
require.NotNil(t, mountRes)
assert.Equal(t, path2, mountRes.Mountpoint)
unmountReq := &docker.UnmountRequest{
Name: "vol2",
ID: "id1",
}
err = drv.Unmount(unmountReq)
assert.NoError(t, err)
err = drv.Unmount(unmountReq)
assert.Error(t, err)
assertErrorContains(t, err, "not mounted by this id")
// Simulate plugin restart
drv2, err := docker.NewDriver(ctx, testDir, nil, nil, true, false)
assert.NoError(t, err)
require.NotNil(t, drv2)
// New plugin instance should pick up the saved state
listRes, err = drv2.List()
require.NoError(t, err)
require.Equal(t, 2, len(listRes.Volumes))
assertVolumeInfo(t, listRes.Volumes[0], "vol1", path1)
assertVolumeInfo(t, listRes.Volumes[1], "vol2", path2)
rmReq := &docker.RemoveRequest{Name: "vol2"}
err = drv.Remove(rmReq)
assertErrorContains(t, err, "volume is in use")
unmountReq.ID = "id1"
err = drv.Unmount(unmountReq)
assert.Error(t, err)
assertErrorContains(t, err, "not mounted by this id")
unmountReq.ID = "id2"
err = drv.Unmount(unmountReq)
assert.NoError(t, err)
err = drv.Unmount(unmountReq)
assert.EqualError(t, err, "volume is not mounted")
err = drv.Remove(rmReq)
assert.NoError(t, err)
}
const (
httpTimeout = 2 * time.Second
tempDelay = 10 * time.Millisecond
)
type APIClient struct {
t *testing.T
cli *http.Client
host string
}
func newAPIClient(t *testing.T, host, unixPath string) *APIClient {
tr := &http.Transport{
MaxIdleConns: 1,
IdleConnTimeout: httpTimeout,
DisableCompression: true,
}
if unixPath != "" {
tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", unixPath)
}
} else {
dialer := &net.Dialer{
Timeout: httpTimeout,
KeepAlive: httpTimeout,
}
tr.DialContext = dialer.DialContext
}
cli := &http.Client{
Transport: tr,
Timeout: httpTimeout,
}
return &APIClient{
t: t,
cli: cli,
host: host,
}
}
func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
t := a.t
var (
dataIn []byte
dataOut []byte
err error
)
realm := "VolumeDriver"
if path == "Activate" {
realm = "Plugin"
}
url := fmt.Sprintf("http://%s/%s.%s", a.host, realm, path)
if str, isString := in.(string); isString {
dataIn = []byte(str)
} else {
dataIn, err = json.Marshal(in)
require.NoError(t, err)
}
fs.Logf(path, "<-- %s", dataIn)
req, err := http.NewRequest("POST", url, bytes.NewBuffer(dataIn))
require.NoError(t, err)
req.Header.Set("Content-Type", "application/json")
res, err := a.cli.Do(req)
require.NoError(t, err)
wantStatus := http.StatusOK
if wantErr {
wantStatus = http.StatusInternalServerError
}
assert.Equal(t, wantStatus, res.StatusCode)
dataOut, err = ioutil.ReadAll(res.Body)
require.NoError(t, err)
err = res.Body.Close()
require.NoError(t, err)
if strPtr, isString := out.(*string); isString || wantErr {
require.True(t, isString, "must use string for error response")
if wantErr {
var errRes docker.ErrorResponse
err = json.Unmarshal(dataOut, &errRes)
require.NoError(t, err)
*strPtr = errRes.Err
} else {
*strPtr = strings.TrimSpace(string(dataOut))
}
} else {
err = json.Unmarshal(dataOut, out)
require.NoError(t, err)
}
fs.Logf(path, "--> %s", dataOut)
time.Sleep(tempDelay)
}
func testMountAPI(t *testing.T, sockAddr string) {
if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil {
t.Skip("Test requires working mount command")
}
ctx := context.Background()
oldCacheDir := config.CacheDir
testDir, testFs := initialise(ctx, t)
config.CacheDir = testDir
defer func() {
config.CacheDir = oldCacheDir
if !t.Failed() {
fstest.Purge(testFs)
_ = os.RemoveAll(testDir)
}
}()
// Prepare API client
var cli *APIClient
var unixPath string
if sockAddr != "" {
cli = newAPIClient(t, sockAddr, "")
} else {
unixPath = filepath.Join(testDir, "rclone.sock")
cli = newAPIClient(t, "localhost", unixPath)
}
// Create mounting volume driver and listen for requests
drv, err := docker.NewDriver(ctx, testDir, nil, nil, false, true)
require.NoError(t, err)
require.NotNil(t, drv)
defer drv.Exit()
srv := docker.NewServer(drv)
go func() {
var errServe error
if unixPath != "" {
errServe = srv.ServeUnix(unixPath, os.Getgid())
} else {
errServe = srv.ServeTCP(sockAddr, testDir, nil, false)
}
assert.ErrorIs(t, errServe, http.ErrServerClosed)
}()
defer func() {
err := srv.Shutdown(ctx)
assert.NoError(t, err)
fs.Logf(nil, "Server stopped")
time.Sleep(tempDelay)
}()
time.Sleep(tempDelay) // Let server start
// Run test sequence
path1 := filepath.Join(testDir, "path1")
require.NoError(t, os.MkdirAll(path1, 0755))
mount1 := filepath.Join(testDir, "vol1")
res := ""
cli.request("Activate", "{}", &res, false)
assert.Contains(t, res, `"VolumeDriver"`)
createReq := docker.CreateRequest{
Name: "vol1",
Options: docker.VolOpts{"remote": path1},
}
cli.request("Create", createReq, &res, false)
assert.Equal(t, "{}", res)
cli.request("Create", createReq, &res, true)
assert.Contains(t, res, "volume already exists")
mountReq := docker.MountRequest{Name: "vol1", ID: "id1"}
var mountRes docker.MountResponse
cli.request("Mount", mountReq, &mountRes, false)
assert.Equal(t, mount1, mountRes.Mountpoint)
cli.request("Mount", mountReq, &res, true)
assert.Contains(t, res, "already mounted by this id")
removeReq := docker.RemoveRequest{Name: "vol1"}
cli.request("Remove", removeReq, &res, true)
assert.Contains(t, res, "volume is in use")
text := []byte("banana")
err = ioutil.WriteFile(filepath.Join(mount1, "txt"), text, 0644)
assert.NoError(t, err)
time.Sleep(tempDelay)
text2, err := ioutil.ReadFile(filepath.Join(path1, "txt"))
assert.NoError(t, err)
assert.Equal(t, text, text2)
unmountReq := docker.UnmountRequest{Name: "vol1", ID: "id1"}
cli.request("Unmount", unmountReq, &res, false)
assert.Equal(t, "{}", res)
cli.request("Unmount", unmountReq, &res, true)
assert.Equal(t, "volume is not mounted", res)
cli.request("Remove", removeReq, &res, false)
assert.Equal(t, "{}", res)
cli.request("Remove", removeReq, &res, true)
assert.Equal(t, "volume not found", res)
var listRes docker.ListResponse
cli.request("List", "{}", &listRes, false)
assert.Empty(t, listRes.Volumes)
}
func TestDockerPluginMountTCP(t *testing.T) {
testMountAPI(t, "localhost:53789")
}
func TestDockerPluginMountUnix(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("Test is Linux-only")
}
testMountAPI(t, "")
}

View File

@@ -1,360 +0,0 @@
package docker
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"sort"
"sync"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
)
// Driver implements docker driver api
type Driver struct {
root string
volumes map[string]*Volume
statePath string
dummy bool // disables real mounting
mntOpt mountlib.Options
vfsOpt vfscommon.Options
mu sync.Mutex
exitOnce sync.Once
hupChan chan os.Signal
monChan chan bool // exit if true for exit, refresh if false
}
// NewDriver makes a new docker driver
func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOpt *vfscommon.Options, dummy, forgetState bool) (*Driver, error) {
// setup directories
cacheDir, err := filepath.Abs(config.CacheDir)
if err != nil {
return nil, errors.Wrap(err, "failed to make --cache-dir absolute")
}
err = os.MkdirAll(cacheDir, 0700)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory: %s", cacheDir)
}
//err = os.MkdirAll(root, 0755)
if err != nil {
return nil, errors.Wrapf(err, "failed to create mount root: %s", root)
}
// setup driver state
if mntOpt == nil {
mntOpt = &mountlib.Opt
}
if vfsOpt == nil {
vfsOpt = &vfsflags.Opt
}
drv := &Driver{
root: root,
statePath: filepath.Join(cacheDir, stateFile),
volumes: map[string]*Volume{},
mntOpt: *mntOpt,
vfsOpt: *vfsOpt,
dummy: dummy,
}
drv.mntOpt.Daemon = false
// restore from saved state
if !forgetState {
if err = drv.restoreState(ctx); err != nil {
return nil, errors.Wrap(err, "failed to restore state")
}
}
// start mount monitoring
drv.hupChan = make(chan os.Signal, 1)
drv.monChan = make(chan bool, 1)
mountlib.NotifyOnSigHup(drv.hupChan)
go drv.monitor()
// unmount all volumes on exit
atexit.Register(func() {
drv.exitOnce.Do(drv.Exit)
})
// notify systemd
if err := sysdnotify.Ready(); err != nil {
return nil, errors.Wrap(err, "failed to notify systemd")
}
return drv, nil
}
// Exit will unmount all currently mounted volumes
func (drv *Driver) Exit() {
fs.Debugf(nil, "Unmount all volumes")
drv.mu.Lock()
defer drv.mu.Unlock()
reportErr(sysdnotify.Stopping())
drv.monChan <- true // ask monitor to exit
for _, vol := range drv.volumes {
reportErr(vol.unmountAll())
vol.Mounts = []string{} // never persist mounts at exit
}
reportErr(drv.saveState())
drv.dummy = true // no more mounts
}
// monitor all mounts
func (drv *Driver) monitor() {
for {
// https://stackoverflow.com/questions/19992334/how-to-listen-to-n-channels-dynamic-select-statement
monChan := reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(drv.monChan),
}
hupChan := reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(drv.monChan),
}
sources := []reflect.SelectCase{monChan, hupChan}
volumes := []*Volume{nil, nil}
drv.mu.Lock()
for _, vol := range drv.volumes {
if vol.mnt.ErrChan != nil {
errSource := reflect.SelectCase{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(vol.mnt.ErrChan),
}
sources = append(sources, errSource)
volumes = append(volumes, vol)
}
}
drv.mu.Unlock()
fs.Debugf(nil, "Monitoring %d volumes", len(sources)-2)
idx, val, _ := reflect.Select(sources)
switch idx {
case 0:
if val.Bool() {
fs.Debugf(nil, "Monitoring stopped")
return
}
case 1:
// user sent SIGHUP to clear the cache
drv.clearCache()
default:
vol := volumes[idx]
if err := val.Interface(); err != nil {
fs.Logf(nil, "Volume %q unmounted externally: %v", vol.Name, err)
} else {
fs.Infof(nil, "Volume %q unmounted externally", vol.Name)
}
drv.mu.Lock()
reportErr(vol.unmountAll())
drv.mu.Unlock()
}
}
}
// clearCache will clear cache of all volumes
func (drv *Driver) clearCache() {
fs.Debugf(nil, "Clear all caches")
drv.mu.Lock()
defer drv.mu.Unlock()
for _, vol := range drv.volumes {
reportErr(vol.clearCache())
}
}
func reportErr(err error) {
if err != nil {
fs.Errorf("docker plugin", "%v", err)
}
}
// Create volume
// To use subpath we are limited to defining a new volume definition via alias
func (drv *Driver) Create(req *CreateRequest) error {
ctx := context.Background()
drv.mu.Lock()
defer drv.mu.Unlock()
name := req.Name
fs.Debugf(nil, "Create volume %q", name)
if vol, _ := drv.getVolume(name); vol != nil {
return ErrVolumeExists
}
vol, err := newVolume(ctx, name, req.Options, drv)
if err != nil {
return err
}
drv.volumes[name] = vol
return drv.saveState()
}
// Remove volume
func (drv *Driver) Remove(req *RemoveRequest) error {
ctx := context.Background()
drv.mu.Lock()
defer drv.mu.Unlock()
vol, err := drv.getVolume(req.Name)
if err != nil {
return err
}
if err = vol.remove(ctx); err != nil {
return err
}
delete(drv.volumes, vol.Name)
return drv.saveState()
}
// List volumes handled by the driver
func (drv *Driver) List() (*ListResponse, error) {
drv.mu.Lock()
defer drv.mu.Unlock()
volumeList := drv.listVolumes()
fs.Debugf(nil, "List: %v", volumeList)
res := &ListResponse{
Volumes: []*VolInfo{},
}
for _, name := range volumeList {
vol := drv.volumes[name]
res.Volumes = append(res.Volumes, vol.getInfo())
}
return res, nil
}
// Get volume info
func (drv *Driver) Get(req *GetRequest) (*GetResponse, error) {
drv.mu.Lock()
defer drv.mu.Unlock()
vol, err := drv.getVolume(req.Name)
if err != nil {
return nil, err
}
return &GetResponse{Volume: vol.getInfo()}, nil
}
// Path returns path of the requested volume
func (drv *Driver) Path(req *PathRequest) (*PathResponse, error) {
drv.mu.Lock()
defer drv.mu.Unlock()
vol, err := drv.getVolume(req.Name)
if err != nil {
return nil, err
}
return &PathResponse{Mountpoint: vol.MountPoint}, nil
}
// Mount volume
func (drv *Driver) Mount(req *MountRequest) (*MountResponse, error) {
drv.mu.Lock()
defer drv.mu.Unlock()
vol, err := drv.getVolume(req.Name)
if err == nil {
err = vol.mount(req.ID)
}
if err == nil {
err = drv.saveState()
}
if err != nil {
return nil, err
}
return &MountResponse{Mountpoint: vol.MountPoint}, nil
}
// Unmount volume
func (drv *Driver) Unmount(req *UnmountRequest) error {
drv.mu.Lock()
defer drv.mu.Unlock()
vol, err := drv.getVolume(req.Name)
if err == nil {
err = vol.unmount(req.ID)
}
if err == nil {
err = drv.saveState()
}
return err
}
// getVolume returns volume by name
func (drv *Driver) getVolume(name string) (*Volume, error) {
vol := drv.volumes[name]
if vol == nil {
return nil, ErrVolumeNotFound
}
return vol, nil
}
// listVolumes returns list volume listVolumes
func (drv *Driver) listVolumes() []string {
names := []string{}
for key := range drv.volumes {
names = append(names, key)
}
sort.Strings(names)
return names
}
// saveState saves volumes handled by driver to persistent store
func (drv *Driver) saveState() error {
volumeList := drv.listVolumes()
fs.Debugf(nil, "Save state %v to %s", volumeList, drv.statePath)
state := []*Volume{}
for _, key := range volumeList {
vol := drv.volumes[key]
vol.prepareState()
state = append(state, vol)
}
data, err := json.Marshal(state)
if err == nil {
err = ioutil.WriteFile(drv.statePath, data, 0600)
}
if err != nil {
return errors.Wrap(err, "failed to write state")
}
return nil
}
// restoreState recreates volumes from saved driver state
func (drv *Driver) restoreState(ctx context.Context) error {
fs.Debugf(nil, "Restore state from %s", drv.statePath)
data, err := ioutil.ReadFile(drv.statePath)
if os.IsNotExist(err) {
return nil
}
var state []*Volume
if err == nil {
err = json.Unmarshal(data, &state)
}
if err != nil {
fs.Logf(nil, "Failed to restore plugin state: %v", err)
return nil
}
for _, vol := range state {
if err := vol.restoreState(ctx, drv); err != nil {
fs.Logf(nil, "Failed to restore volume %q: %v", vol.Name, err)
continue
}
drv.volumes[vol.Name] = vol
}
return nil
}

View File

@@ -1,43 +0,0 @@
package docker
// Note: "|" will be replaced by backticks
var longHelp = `
This command implements the Docker volume plugin API allowing docker to use
rclone as a data storage mechanism for various cloud providers.
rclone provides [docker volume plugin](/docker) based on it.
To create a docker plugin, one must create a Unix or TCP socket that Docker
will look for when you use the plugin and then it listens for commands from
docker daemon and runs the corresponding code when necessary.
Docker plugins can run as a managed plugin under control of the docker daemon
or as an independent native service. For testing, you can just run it directly
from the command line, for example:
|||
sudo rclone serve docker --base-dir /tmp/rclone-volumes --socket-addr localhost:8787 -vv
|||
Running |rclone serve docker| will create the said socket, listening for
commands from Docker to create the necessary Volumes. Normally you need not
give the |--socket-addr| flag. The API will listen on the unix domain socket
at |/run/docker/plugins/rclone.sock|. In the example above rclone will create
a TCP socket and a small file |/etc/docker/plugins/rclone.spec| containing
the socket address. We use |sudo| because both paths are writeable only by
the root user.
If you later decide to change listening socket, the docker daemon must be
restarted to reconnect to |/run/docker/plugins/rclone.sock|
or parse new |/etc/docker/plugins/rclone.spec|. Until you restart, any
volume related docker commands will timeout trying to access the old socket.
Running directly is supported on **Linux only**, not on Windows or MacOS.
This is not a problem with managed plugin mode described in details
in the [full documentation](https://rclone.org/docker).
The command will create volume mounts under the path given by |--base-dir|
(by default |/var/lib/docker-volumes/rclone| available only to root)
and maintain the JSON formatted file |docker-plugin.state| in the rclone cache
directory with book-keeping records of created and mounted volumes.
All mount and VFS options are submitted by the docker daemon via API, but
you can also provide defaults on the command line as well as set path to the
config file and cache directory or adjust logging verbosity.
`

View File

@@ -1,307 +0,0 @@
package docker
import (
"strings"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// applyOptions configures volume from request options.
//
// There are 5 special options:
// - "remote" aka "fs" determines existing remote from config file
// with a path or on-the-fly remote using the ":backend:" syntax.
// It is usually named "remote" in documentation but can be aliased as
// "fs" to avoid confusion with the "remote" option of some backends.
// - "type" is equivalent to the ":backend:" syntax (optional).
// - "path" provides explicit on-remote path for "type" (optional).
// - "mount-type" can be "mount", "cmount" or "mount2", defaults to
// first found (optional).
// - "persist" is reserved for future to create remotes persisted
// in rclone.conf similar to rcd (optional).
//
// Unlike rcd we use the flat naming scheme for mount, vfs and backend
// options without substructures. Dashes, underscores and mixed case
// in option names can be used interchangeably. Option name conflicts
// can be resolved in a manner similar to rclone CLI by adding prefixes:
// "vfs-", primary mount backend type like "sftp-", and so on.
//
// After triaging the options are put in MountOpt, VFSOpt or connect
// string for actual filesystem setup and in volume.Options for saving
// the state.
func (vol *Volume) applyOptions(volOpt VolOpts) error {
// copy options to override later
mntOpt := &vol.mnt.MountOpt
vfsOpt := &vol.mnt.VFSOpt
*mntOpt = vol.drv.mntOpt
*vfsOpt = vol.drv.vfsOpt
// vol.Options has all options except "remote" and "type"
vol.Options = VolOpts{}
vol.fsString = ""
var fsName, fsPath, fsType string
var explicitPath string
var fsOpt configmap.Simple
// parse "remote" or "type"
for key, str := range volOpt {
switch key {
case "":
continue
case "remote", "fs":
p, err := fspath.Parse(str)
if err != nil || p.Name == ":" {
return errors.Wrapf(err, "cannot parse path %q", str)
}
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
vol.Fs = str
case "type":
fsType = str
vol.Type = str
case "path":
explicitPath = str
vol.Path = str
default:
vol.Options[key] = str
}
}
// find options supported by backend
if strings.HasPrefix(fsName, ":") {
fsType = fsName[1:]
fsName = ""
}
if fsType == "" {
fsType = "local"
if fsName != "" {
var ok bool
fsType, ok = fs.ConfigMap(nil, fsName, nil).Get("type")
if !ok {
return fs.ErrorNotFoundInConfigFile
}
}
}
if explicitPath != "" {
if fsPath != "" {
fs.Logf(nil, "Explicit path will override connection string")
}
fsPath = explicitPath
}
fsInfo, err := fs.Find(fsType)
if err != nil {
return errors.Errorf("unknown filesystem type %q", fsType)
}
// handle remaining options, override fsOpt
if fsOpt == nil {
fsOpt = configmap.Simple{}
}
opt := rc.Params{}
for key, val := range vol.Options {
opt[key] = val
}
for key := range opt {
var ok bool
var err error
switch normalOptName(key) {
case "persist":
vol.persist, err = opt.GetBool(key)
ok = true
case "mount-type":
vol.mountType, err = opt.GetString(key)
ok = true
}
if err != nil {
return errors.Wrapf(err, "cannot parse option %q", key)
}
if !ok {
// try to use as a mount option in mntOpt
ok, err = getMountOption(mntOpt, opt, key)
if ok && err != nil {
return errors.Wrapf(err, "cannot parse mount option %q", key)
}
}
if !ok {
// try as a vfs option in vfsOpt
ok, err = getVFSOption(vfsOpt, opt, key)
if ok && err != nil {
return errors.Wrapf(err, "cannot parse vfs option %q", key)
}
}
if !ok {
// try as a backend option in fsOpt (backends use "_" instead of "-")
optWithPrefix := strings.ReplaceAll(normalOptName(key), "-", "_")
fsOptName := strings.TrimPrefix(optWithPrefix, fsType+"_")
hasFsPrefix := optWithPrefix != fsOptName
if !hasFsPrefix || fsInfo.Options.Get(fsOptName) == nil {
fs.Logf(nil, "Option %q is not supported by backend %q", key, fsType)
return errors.Errorf("unsupported backend option %q", key)
}
fsOpt[fsOptName], err = opt.GetString(key)
if err != nil {
return errors.Wrapf(err, "cannot parse backend option %q", key)
}
}
}
// build remote string from fsName, fsType, fsOpt, fsPath
colon := ":"
comma := ","
if fsName == "" {
fsName = ":" + fsType
}
connString := fsOpt.String()
if fsName == "" && fsType == "" {
colon = ""
connString = ""
}
if connString == "" {
comma = ""
}
vol.fsString = fsName + comma + connString + colon + fsPath
return vol.validate()
}
func getMountOption(mntOpt *mountlib.Options, opt rc.Params, key string) (ok bool, err error) {
ok = true
switch normalOptName(key) {
case "debug-fuse":
mntOpt.DebugFUSE, err = opt.GetBool(key)
case "attr-timeout":
mntOpt.AttrTimeout, err = opt.GetDuration(key)
case "option":
mntOpt.ExtraOptions, err = getStringArray(opt, key)
case "fuse-flag":
mntOpt.ExtraFlags, err = getStringArray(opt, key)
case "daemon":
mntOpt.Daemon, err = opt.GetBool(key)
case "daemon-timeout":
mntOpt.DaemonTimeout, err = opt.GetDuration(key)
case "default-permissions":
mntOpt.DefaultPermissions, err = opt.GetBool(key)
case "allow-non-empty":
mntOpt.AllowNonEmpty, err = opt.GetBool(key)
case "allow-root":
mntOpt.AllowRoot, err = opt.GetBool(key)
case "allow-other":
mntOpt.AllowOther, err = opt.GetBool(key)
case "async-read":
mntOpt.AsyncRead, err = opt.GetBool(key)
case "max-read-ahead":
err = getFVarP(&mntOpt.MaxReadAhead, opt, key)
case "write-back-cache":
mntOpt.WritebackCache, err = opt.GetBool(key)
case "volname":
mntOpt.VolumeName, err = opt.GetString(key)
case "noappledouble":
mntOpt.NoAppleDouble, err = opt.GetBool(key)
case "noapplexattr":
mntOpt.NoAppleXattr, err = opt.GetBool(key)
case "network-mode":
mntOpt.NetworkMode, err = opt.GetBool(key)
default:
ok = false
}
return
}
func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool, err error) {
var intVal int64
ok = true
switch normalOptName(key) {
// options prefixed with "vfs-"
case "vfs-cache-mode":
err = getFVarP(&vfsOpt.CacheMode, opt, key)
case "vfs-cache-poll-interval":
vfsOpt.CachePollInterval, err = opt.GetDuration(key)
case "vfs-cache-max-age":
vfsOpt.CacheMaxAge, err = opt.GetDuration(key)
case "vfs-cache-max-size":
err = getFVarP(&vfsOpt.CacheMaxSize, opt, key)
case "vfs-read-chunk-size":
err = getFVarP(&vfsOpt.ChunkSize, opt, key)
case "vfs-read-chunk-size-limit":
err = getFVarP(&vfsOpt.ChunkSizeLimit, opt, key)
case "vfs-case-insensitive":
vfsOpt.CaseInsensitive, err = opt.GetBool(key)
case "vfs-write-wait":
vfsOpt.WriteWait, err = opt.GetDuration(key)
case "vfs-read-wait":
vfsOpt.ReadWait, err = opt.GetDuration(key)
case "vfs-write-back":
vfsOpt.WriteBack, err = opt.GetDuration(key)
case "vfs-read-ahead":
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
case "vfs-used-is-size":
vfsOpt.UsedIsSize, err = opt.GetBool(key)
// unprefixed vfs options
case "no-modtime":
vfsOpt.NoModTime, err = opt.GetBool(key)
case "no-checksum":
vfsOpt.NoChecksum, err = opt.GetBool(key)
case "dir-cache-time":
vfsOpt.DirCacheTime, err = opt.GetDuration(key)
case "poll-interval":
vfsOpt.PollInterval, err = opt.GetDuration(key)
case "read-only":
vfsOpt.ReadOnly, err = opt.GetBool(key)
case "dir-perms":
perms := &vfsflags.FileMode{Mode: &vfsOpt.DirPerms}
err = getFVarP(perms, opt, key)
case "file-perms":
perms := &vfsflags.FileMode{Mode: &vfsOpt.FilePerms}
err = getFVarP(perms, opt, key)
// unprefixed unix-only vfs options
case "umask":
intVal, err = opt.GetInt64(key)
vfsOpt.Umask = int(intVal)
case "uid":
intVal, err = opt.GetInt64(key)
vfsOpt.UID = uint32(intVal)
case "gid":
intVal, err = opt.GetInt64(key)
vfsOpt.GID = uint32(intVal)
// non-vfs options
default:
ok = false
}
return
}
func getFVarP(pvalue pflag.Value, opt rc.Params, key string) error {
str, err := opt.GetString(key)
if err != nil {
return err
}
return pvalue.Set(str)
}
func getStringArray(opt rc.Params, key string) ([]string, error) {
str, err := opt.GetString(key)
if err != nil {
return nil, err
}
return strings.Split(str, ","), nil
}
func normalOptName(key string) string {
return strings.ReplaceAll(strings.TrimPrefix(strings.ToLower(key), "--"), "_", "-")
}

View File

@@ -1,100 +0,0 @@
package docker
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
)
// Server connects plugin with docker daemon by protocol
type Server http.Server
// NewServer creates new docker plugin server
func NewServer(drv *Driver) *Server {
return &Server{Handler: newRouter(drv)}
}
// Shutdown the server
func (s *Server) Shutdown(ctx context.Context) error {
hs := (*http.Server)(s)
return hs.Shutdown(ctx)
}
func (s *Server) serve(listener net.Listener, addr, tempFile string) error {
if tempFile != "" {
atexit.Register(func() {
// remove spec file or self-created unix socket
fs.Debugf(nil, "Removing stale file %s", tempFile)
_ = os.Remove(tempFile)
})
}
hs := (*http.Server)(s)
return hs.Serve(listener)
}
// ServeUnix makes the handler to listen for requests in a unix socket.
// It also creates the socket file in the right directory for docker to read.
func (s *Server) ServeUnix(path string, gid int) error {
listener, socketPath, err := newUnixListener(path, gid)
if err != nil {
return err
}
if socketPath != "" {
path = socketPath
fs.Infof(nil, "Serving unix socket: %s", path)
} else {
fs.Infof(nil, "Serving systemd socket")
}
return s.serve(listener, path, socketPath)
}
// ServeTCP makes the handler listen for request on a given TCP address.
// It also writes the spec file in the right directory for docker to read.
func (s *Server) ServeTCP(addr, specDir string, tlsConfig *tls.Config, noSpec bool) error {
listener, err := net.Listen("tcp", addr)
if err != nil {
return err
}
if tlsConfig != nil {
tlsConfig.NextProtos = []string{"http/1.1"}
listener = tls.NewListener(listener, tlsConfig)
}
addr = listener.Addr().String()
specFile := ""
if !noSpec {
specFile, err = writeSpecFile(addr, "tcp", specDir)
if err != nil {
return err
}
}
fs.Infof(nil, "Serving TCP socket: %s", addr)
return s.serve(listener, addr, specFile)
}
func writeSpecFile(addr, proto, specDir string) (string, error) {
if specDir == "" && runtime.GOOS == "windows" {
specDir = os.TempDir()
}
if specDir == "" {
specDir = defSpecDir
}
if err := os.MkdirAll(specDir, 0755); err != nil {
return "", err
}
specFile := filepath.Join(specDir, "rclone.spec")
url := fmt.Sprintf("%s://%s", proto, addr)
if err := ioutil.WriteFile(specFile, []byte(url), 0644); err != nil {
return "", err
}
fs.Debugf(nil, "Plugin spec has been written to %s", specFile)
return specFile, nil
}

View File

@@ -1,17 +0,0 @@
// +build linux,!android
package docker
import (
"os"
"github.com/coreos/go-systemd/activation"
"github.com/coreos/go-systemd/util"
)
func systemdActivationFiles() []*os.File {
if util.IsRunningSystemd() {
return activation.Files(false)
}
return nil
}

View File

@@ -1,11 +0,0 @@
// +build !linux android
package docker
import (
"os"
)
func systemdActivationFiles() []*os.File {
return nil
}

View File

@@ -1,56 +0,0 @@
// +build linux freebsd
package docker
import (
"fmt"
"net"
"os"
"path/filepath"
)
func newUnixListener(path string, gid int) (net.Listener, string, error) {
// try systemd socket activation
fds := systemdActivationFiles()
switch len(fds) {
case 0:
// fall thru
case 1:
listener, err := net.FileListener(fds[0])
return listener, "", err
default:
return nil, "", fmt.Errorf("expected only one socket from systemd, got %d", len(fds))
}
// create socket outselves
if filepath.Ext(path) == "" {
path += ".sock"
}
if !filepath.IsAbs(path) {
path = filepath.Join(sockDir, path)
}
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, "", err
}
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
return nil, "", err
}
listener, err := net.Listen("unix", path)
if err != nil {
return nil, "", err
}
if err = os.Chmod(path, 0660); err != nil {
return nil, "", err
}
if os.Geteuid() == 0 {
if err = os.Chown(path, 0, gid); err != nil {
return nil, "", err
}
}
// we don't use spec file with unix sockets
return listener, path, nil
}

View File

@@ -1,12 +0,0 @@
// +build !linux,!freebsd
package docker
import (
"errors"
"net"
)
func newUnixListener(path string, gid int) (net.Listener, string, error) {
return nil, "", errors.New("unix sockets require Linux or FreeBSD")
}

View File

@@ -1,326 +0,0 @@
package docker
import (
"context"
"os"
"path/filepath"
"runtime"
"sort"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/rc"
)
// Errors
var (
ErrVolumeNotFound = errors.New("volume not found")
ErrVolumeExists = errors.New("volume already exists")
ErrMountpointExists = errors.New("non-empty mountpoint already exists")
)
// Volume keeps volume runtime state
// Public members get persisted in saved state
type Volume struct {
Name string `json:"name"`
MountPoint string `json:"mountpoint"`
CreatedAt time.Time `json:"created"`
Fs string `json:"fs"` // remote[,connectString]:path
Type string `json:"type,omitempty"` // same as ":backend:"
Path string `json:"path,omitempty"` // for "remote:path" or ":backend:path"
Options VolOpts `json:"options"` // all options together
Mounts []string `json:"mounts"` // mountReqs as a string list
mountReqs map[string]interface{}
fsString string // result of merging Fs, Type and Options
persist bool
mountType string
drv *Driver
mnt *mountlib.MountPoint
}
// VolOpts keeps volume options
type VolOpts map[string]string
// VolInfo represents a volume for Get and List requests
type VolInfo struct {
Name string
Mountpoint string `json:",omitempty"`
CreatedAt string `json:",omitempty"`
Status map[string]interface{} `json:",omitempty"`
}
func newVolume(ctx context.Context, name string, volOpt VolOpts, drv *Driver) (*Volume, error) {
path := filepath.Join(drv.root, name)
mnt := &mountlib.MountPoint{
MountPoint: path,
}
vol := &Volume{
Name: name,
MountPoint: path,
CreatedAt: time.Now(),
drv: drv,
mnt: mnt,
mountReqs: make(map[string]interface{}),
}
err := vol.applyOptions(volOpt)
if err == nil {
err = vol.setup(ctx)
}
if err != nil {
return nil, err
}
return vol, nil
}
// getInfo returns short digest about volume
func (vol *Volume) getInfo() *VolInfo {
vol.prepareState()
return &VolInfo{
Name: vol.Name,
CreatedAt: vol.CreatedAt.Format(time.RFC3339),
Mountpoint: vol.MountPoint,
Status: rc.Params{"Mounts": vol.Mounts},
}
}
// prepareState prepares volume for saving state
func (vol *Volume) prepareState() {
vol.Mounts = []string{}
for id := range vol.mountReqs {
vol.Mounts = append(vol.Mounts, id)
}
sort.Strings(vol.Mounts)
}
// restoreState updates volume from saved state
func (vol *Volume) restoreState(ctx context.Context, drv *Driver) error {
vol.drv = drv
vol.mnt = &mountlib.MountPoint{
MountPoint: vol.MountPoint,
}
volOpt := vol.Options
volOpt["fs"] = vol.Fs
volOpt["type"] = vol.Type
if err := vol.applyOptions(volOpt); err != nil {
return err
}
if err := vol.validate(); err != nil {
return err
}
if err := vol.setup(ctx); err != nil {
return err
}
for _, id := range vol.Mounts {
if err := vol.mount(id); err != nil {
return err
}
}
return nil
}
// validate volume
func (vol *Volume) validate() error {
if vol.Name == "" {
return errors.New("volume name is required")
}
if (vol.Type != "" && vol.Fs != "") || (vol.Type == "" && vol.Fs == "") {
return errors.New("volume must have either remote or backend type")
}
if vol.persist && vol.Type == "" {
return errors.New("backend type is required to persist remotes")
}
if vol.persist && !canPersist {
return errors.New("using backend type to persist remotes is prohibited")
}
if vol.MountPoint == "" {
return errors.New("mount point is required")
}
if vol.mountReqs == nil {
vol.mountReqs = make(map[string]interface{})
}
return nil
}
// checkMountpoint verifies that mount point is an existing empty directory
func (vol *Volume) checkMountpoint() error {
path := vol.mnt.MountPoint
if runtime.GOOS == "windows" {
path = filepath.Dir(path)
}
_, err := os.Lstat(path)
if os.IsNotExist(err) {
if err = os.MkdirAll(path, 0700); err != nil {
return errors.Wrapf(err, "failed to create mountpoint: %s", path)
}
} else if err != nil {
return err
}
if runtime.GOOS != "windows" {
if err := mountlib.CheckMountEmpty(path); err != nil {
return ErrMountpointExists
}
}
return nil
}
// setup volume filesystem
func (vol *Volume) setup(ctx context.Context) error {
fs.Debugf(nil, "Setup volume %q as %q at path %s", vol.Name, vol.fsString, vol.MountPoint)
if err := vol.checkMountpoint(); err != nil {
return err
}
if vol.drv.dummy {
return nil
}
_, mountFn := mountlib.ResolveMountMethod(vol.mountType)
if mountFn == nil {
if vol.mountType != "" {
return errors.Errorf("unsupported mount type %q", vol.mountType)
}
return errors.New("mount command unsupported by this build")
}
vol.mnt.MountFn = mountFn
if vol.persist {
// Add remote to config file
params := rc.Params{}
for key, val := range vol.Options {
params[key] = val
}
updateMode := config.UpdateRemoteOpt{}
_, err := config.CreateRemote(ctx, vol.Name, vol.Type, params, updateMode)
if err != nil {
return err
}
}
// Use existing remote
f, err := fs.NewFs(ctx, vol.fsString)
if err == nil {
vol.mnt.Fs = f
}
return err
}
// remove volume filesystem and mounts
func (vol *Volume) remove(ctx context.Context) error {
count := len(vol.mountReqs)
fs.Debugf(nil, "Remove volume %q (count %d)", vol.Name, count)
if count > 0 {
return errors.New("volume is in use")
}
if !vol.drv.dummy {
shutdownFn := vol.mnt.Fs.Features().Shutdown
if shutdownFn != nil {
if err := shutdownFn(ctx); err != nil {
return err
}
}
}
if vol.persist {
// Remote remote from config file
config.DeleteRemote(vol.Name)
}
return nil
}
// clearCache will clear VFS cache for the volume
func (vol *Volume) clearCache() error {
VFS := vol.mnt.VFS
if VFS == nil {
return nil
}
root, err := VFS.Root()
if err != nil {
return errors.Wrapf(err, "error reading root: %v", VFS.Fs())
}
root.ForgetAll()
return nil
}
// mount volume filesystem
func (vol *Volume) mount(id string) error {
drv := vol.drv
count := len(vol.mountReqs)
fs.Debugf(nil, "Mount volume %q for id %q at path %s (count %d)",
vol.Name, id, vol.MountPoint, count)
if _, found := vol.mountReqs[id]; found {
return errors.New("volume is already mounted by this id")
}
if count > 0 { // already mounted
vol.mountReqs[id] = nil
return nil
}
if drv.dummy {
vol.mountReqs[id] = nil
return nil
}
if vol.mnt.Fs == nil {
return errors.New("volume filesystem is not ready")
}
if _, err := vol.mnt.Mount(); err != nil {
return err
}
vol.mnt.MountedOn = time.Now()
vol.mountReqs[id] = nil
vol.drv.monChan <- false // ask monitor to refresh channels
return nil
}
// unmount volume
func (vol *Volume) unmount(id string) error {
count := len(vol.mountReqs)
fs.Debugf(nil, "Unmount volume %q from id %q at path %s (count %d)",
vol.Name, id, vol.MountPoint, count)
if count == 0 {
return errors.New("volume is not mounted")
}
if _, found := vol.mountReqs[id]; !found {
return errors.New("volume is not mounted by this id")
}
delete(vol.mountReqs, id)
if len(vol.mountReqs) > 0 {
return nil // more mounts left
}
if vol.drv.dummy {
return nil
}
mnt := vol.mnt
if mnt.UnmountFn != nil {
if err := mnt.UnmountFn(); err != nil {
return err
}
}
mnt.ErrChan = nil
mnt.UnmountFn = nil
mnt.VFS = nil
vol.drv.monChan <- false // ask monitor to refresh channels
return nil
}
func (vol *Volume) unmountAll() error {
var firstErr error
for id := range vol.mountReqs {
err := vol.unmount(id)
if firstErr == nil {
firstErr = err
}
}
return firstErr
}

View File

@@ -1,94 +0,0 @@
//go:generate go run assets_generate.go
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"html/template"
"io/ioutil"
"time"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
)
// Help describes the options for the serve package
var Help = `--template allows a user to specify a custom markup template for http
and webdav serve functions. The server exports the following markup
to be used within the template to server pages:
| Parameter | Description |
| :---------- | :---------- |
| .Name | The full path of a file/directory. |
| .Title | Directory listing of .Name |
| .Sort | The current sort used. This is changeable via ?sort= parameter |
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
| .Order | The current ordering used. This is changeable via ?order= parameter |
| | Order Options: asc,desc (default asc) |
| .Query | Currently unused. |
| .Breadcrumb | Allows for creating a relative navigation |
|-- .Link | The relative to the root link of the Text. |
|-- .Text | The Name of the directory. |
| .Entries | Information about a specific file/directory. |
|-- .URL | The 'url' of an entry. |
|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. |
|-- .IsDir | Boolean for if an entry is a directory or not. |
|-- .Size | Size in Bytes of the entry. |
|-- .ModTime | The UTC timestamp of an entry. |
`
// Options for the templating functionality
type Options struct {
Template string
}
// AddFlags for the templating functionality
func AddFlags(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.")
}
// AfterEpoch returns the time since the epoch for the given time
func AfterEpoch(t time.Time) bool {
return t.After(time.Time{})
}
// GetTemplate returns the HTML template for serving directories via HTTP/Webdav
func GetTemplate(tmpl string) (tpl *template.Template, err error) {
var templateString string
if tmpl == "" {
templateFile, err := Assets.Open("index.html")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
templateString = string(templateBytes)
} else {
templateFile, err := ioutil.ReadFile(tmpl)
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
templateString = string(templateFile)
}
funcMap := template.FuncMap{
"afterEpoch": AfterEpoch,
}
tpl, err = template.New("index").Funcs(funcMap).Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

View File

@@ -1,9 +1,7 @@
package http
import (
"html/template"
"io"
"log"
"net/http"
"os"
"path"
@@ -11,35 +9,19 @@ import (
"strings"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/http/data"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/auth"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
)
// Options required for http server
type Options struct {
data.Options
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{}
// Opt is options set by command line flags
var Opt = DefaultOpt
func init() {
data.AddFlags(Command.Flags(), "", &Opt.Options)
httplib.AddFlags(Command.Flags())
auth.AddFlags(Command.Flags())
httpflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
}
@@ -58,18 +40,17 @@ The server will log errors. Use -v to see access logs.
--bwlimit will be respected for file transfers. Use --stats to
control the stats printing.
` + httplib.Help + data.Help + auth.Help + vfs.Help,
` + httplib.Help + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, true, command, func() error {
s := newServer(f, Opt.Template)
router, err := httplib.Router()
s := newServer(f, &httpflags.Opt)
err := s.Serve()
if err != nil {
return err
}
s.Bind(router)
httplib.Wait()
s.Wait()
return nil
})
},
@@ -77,37 +58,49 @@ control the stats printing.
// server contains everything to run the server
type server struct {
f fs.Fs
vfs *vfs.VFS
HTMLTemplate *template.Template // HTML template for web interface
*httplib.Server
f fs.Fs
vfs *vfs.VFS
}
func newServer(f fs.Fs, templatePath string) *server {
htmlTemplate, templateErr := data.GetTemplate(templatePath)
if templateErr != nil {
log.Fatalf(templateErr.Error())
}
func newServer(f fs.Fs, opt *httplib.Options) *server {
mux := http.NewServeMux()
s := &server{
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
HTMLTemplate: htmlTemplate,
Server: httplib.NewServer(mux, opt),
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
}
mux.HandleFunc(s.Opt.BaseURL+"/", s.handler)
return s
}
func (s *server) Bind(router chi.Router) {
router.Use(
middleware.SetHeader("Accept-Ranges", "bytes"),
middleware.SetHeader("Server", "rclone/"+fs.Version),
)
router.Get("/*", s.handler)
router.Head("/*", s.handler)
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.Server.Serve()
if err != nil {
return err
}
fs.Logf(s.f, "Serving on %s", s.URL())
return nil
}
// handler reads incoming requests and dispatches them
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
isDir := strings.HasSuffix(r.URL.Path, "/")
remote := strings.Trim(r.URL.Path, "/")
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version)
urlPath, ok := s.Path(w, r)
if !ok {
return
}
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
if isDir {
s.serveDir(w, r, remote)
} else {

View File

@@ -10,10 +10,10 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/filter"
httplib "github.com/rclone/rclone/lib/http"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -32,13 +32,10 @@ const (
func startServer(t *testing.T, f fs.Fs) {
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
httpServer = newServer(f, testTemplate)
router, err := httplib.Router()
if err != nil {
t.Fatal(err.Error())
}
httpServer.Bind(router)
testURL = httplib.URL()
opt.Template = testTemplate
httpServer = newServer(f, &opt)
assert.NoError(t, httpServer.Serve())
testURL = httpServer.Server.URL()
// try to connect to the test server
pause := time.Millisecond
@@ -230,5 +227,6 @@ func TestGET(t *testing.T) {
}
func TestFinalise(t *testing.T) {
_ = httplib.Shutdown()
httpServer.Close()
httpServer.Wait()
}

View File

@@ -1 +1 @@
Method Not Allowed
Method not allowed

View File

@@ -1 +1 @@
Method Not Allowed
Method not allowed

View File

@@ -1,6 +1,4 @@
// Package httplib provides common functionality for http servers
//
// Deprecated: httplib has been replaced with lib/http
package httplib
import (
@@ -19,7 +17,7 @@ import (
auth "github.com/abbot/go-http-auth"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/http/data"
"github.com/rclone/rclone/cmd/serve/httplib/serve/data"
"github.com/rclone/rclone/fs"
)

View File

@@ -0,0 +1,56 @@
//go:generate go run assets_generate.go
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"html/template"
"io/ioutil"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// AfterEpoch returns the time since the epoch for the given time
func AfterEpoch(t time.Time) bool {
return t.After(time.Time{})
}
// GetTemplate returns the HTML template for serving directories via HTTP/Webdav
func GetTemplate(tmpl string) (tpl *template.Template, err error) {
var templateString string
if tmpl == "" {
templateFile, err := Assets.Open("index.html")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
templateString = string(templateBytes)
} else {
templateFile, err := ioutil.ReadFile(tmpl)
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
templateString = string(templateFile)
}
funcMap := template.FuncMap{
"afterEpoch": AfterEpoch,
}
tpl, err = template.New("index").Funcs(funcMap).Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

View File

@@ -10,14 +10,13 @@ import (
"testing"
"time"
"github.com/rclone/rclone/cmd/serve/httplib/serve/data"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/cmd/serve/http/data"
)
func GetTemplate(t *testing.T) *template.Template {
htmlTemplate, err := data.GetTemplate("../../../cmd/serve/http/testdata/golden/testindex.html")
htmlTemplate, err := data.GetTemplate("../../http/testdata/golden/testindex.html")
require.NoError(t, err)
return htmlTemplate
}

View File

@@ -15,13 +15,13 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
"golang.org/x/net/http2"

View File

@@ -5,7 +5,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/dlna"
"github.com/rclone/rclone/cmd/serve/docker"
"github.com/rclone/rclone/cmd/serve/ftp"
"github.com/rclone/rclone/cmd/serve/http"
"github.com/rclone/rclone/cmd/serve/restic"
@@ -31,9 +30,6 @@ func init() {
if sftp.Command != nil {
Command.AddCommand(sftp.Command)
}
if docker.Command != nil {
Command.AddCommand(docker.Command)
}
cmd.Root.AddCommand(Command)
}

View File

@@ -7,7 +7,6 @@ import (
"fmt"
"io"
"net"
"os"
"regexp"
"strings"
@@ -15,9 +14,7 @@ import (
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/terminal"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"golang.org/x/crypto/ssh"
)
@@ -228,8 +225,19 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
// Wait for either subsystem "sftp" or "exec" request
if <-isSFTP {
if err := serveChannel(channel, c.handlers, c.what); err != nil {
fs.Errorf(c.what, "Failed to serve SFPT: %v", err)
fs.Debugf(c.what, "Starting SFTP server")
server := sftp.NewRequestServer(channel, c.handlers)
defer func() {
err := server.Close()
if err != nil && err != io.EOF {
fs.Debugf(c.what, "Failed to close server: %v", err)
}
}()
err = server.Serve()
if err == io.EOF || err == nil {
fs.Debugf(c.what, "exited session")
} else {
fs.Errorf(c.what, "completed with error: %v", err)
}
} else {
var rc = uint32(0)
@@ -255,54 +263,3 @@ func (c *conn) handleChannels(chans <-chan ssh.NewChannel) {
go c.handleChannel(newChannel)
}
}
func serveChannel(rwc io.ReadWriteCloser, h sftp.Handlers, what string) error {
fs.Debugf(what, "Starting SFTP server")
server := sftp.NewRequestServer(rwc, h)
defer func() {
err := server.Close()
if err != nil && err != io.EOF {
fs.Debugf(what, "Failed to close server: %v", err)
}
}()
err := server.Serve()
if err != nil && err != io.EOF {
return errors.Wrap(err, "completed with error")
}
fs.Debugf(what, "exited session")
return nil
}
func serveStdio(f fs.Fs) error {
if terminal.IsTerminal(int(os.Stdout.Fd())) {
return errors.New("refusing to run SFTP server directly on a terminal. Please let sshd start rclone, by connecting with sftp or sshfs")
}
sshChannel := &stdioChannel{
stdin: os.Stdin,
stdout: os.Stdout,
}
handlers := newVFSHandler(vfs.New(f, &vfsflags.Opt))
return serveChannel(sshChannel, handlers, "stdio")
}
type stdioChannel struct {
stdin *os.File
stdout *os.File
}
func (c *stdioChannel) Read(data []byte) (int, error) {
return c.stdin.Read(data)
}
func (c *stdioChannel) Write(data []byte) (int, error) {
return c.stdout.Write(data)
}
func (c *stdioChannel) Close() error {
err1 := c.stdin.Close()
err2 := c.stdout.Close()
if err1 != nil {
return err1
}
return err2
}

View File

@@ -27,7 +27,6 @@ type Options struct {
User string // single username
Pass string // password for user
NoAuth bool // allow no authentication on connections
Stdio bool // serve on stdio
}
// DefaultOpt is the default values used for Options
@@ -48,7 +47,6 @@ func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication.")
flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set.")
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", Opt.Stdio, "Run an sftp server on run stdin/stdout")
}
func init() {
@@ -92,11 +90,6 @@ reachable externally then supply "--addr :2022" for example.
Note that the default of "--vfs-cache-mode off" is fine for the rclone
sftp backend, but it may not be with other SFTP clients.
If --stdio is specified, rclone will serve SFTP over stdio, which can
be used with sshd via ~/.ssh/authorized_keys, for example:
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
` + vfs.Help + proxy.Help,
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
@@ -107,9 +100,6 @@ be used with sshd via ~/.ssh/authorized_keys, for example:
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, true, command, func() error {
if Opt.Stdio {
return serveStdio(f)
}
s := newServer(context.Background(), f, &Opt)
err := s.Serve()
if err != nil {

Some files were not shown because too many files have changed in this diff Show More