1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-20 09:23:21 +00:00

Compare commits

...

68 Commits

Author SHA1 Message Date
Nick Craig-Wood
6f53463c31 vfs: implement LRU-SP cache for more intelligent cache replacement - FIXME WIP
FIXME this needs docs, and maybe needs to be configurable.
FIXME needs tests also

See: https://forum.rclone.org/t/rclone-vfs-cache-maximum-size-per-file/30037
Fixes: #4110
2022-11-16 11:12:31 +00:00
Nick Craig-Wood
705e8f2fe0 smb: fix Failed to sync: context canceled at the end of syncs
Before this change we were putting connections into the connection
pool which had a local context in.

This meant that when the operation had finished the context was
cancelled and the connection became unusable.

See: https://forum.rclone.org/t/failed-to-sync-context-canceled/34017/
2022-11-16 10:55:25 +00:00
Nick Craig-Wood
591fc3609a vfs: fix deadlock caused by cache cleaner and upload finishing
Before this patch a deadlock could occur if the cache cleaner was
running when an object upload finished.

This fixes the problem by delaying marking the object as clean until
we have notified the VFS layer. This means that the cache cleaner
won't consider the object until **after** the VFS layer has been
notified, thus avoiding the deadlock.

See: https://forum.rclone.org/t/rclone-mount-deadlock-when-dir-cache-time-strikes/33486/
2022-11-15 18:01:36 +00:00
Nick Craig-Wood
b4a3d1b9ed Add asdffdsazqqq to contributors 2022-11-15 18:01:36 +00:00
asdffdsazqqq
84219b95ab docs: faq: how to use a proxy server that requires a username and password - fixes #6565 2022-11-15 17:58:43 +00:00
Nick Craig-Wood
2c78f56d48 webdav: fix Move/Copy/DirMove when using -server-side-across-configs
Before this change, when using -server-side-across-configs rclone
would direct Move/Copy/DirMove to the destination server.

However this should be directed to the source server. This is a little
unclear in the RFC, but the name of the parameter "Destination:" seems
clear and this is how dCache and Rucio have implemented it.

See: https://forum.rclone.org/t/webdav-copy-request-implemented-incorrectly/34072/
2022-11-15 09:51:30 +00:00
Nick Craig-Wood
a61d219bcd local: fix -L/--copy-links with filters missing directories
In this commit

8d1fff9a82 local: obey file filters in listing to fix errors on excluded files

We introduced the concept of local backend filters.

Unfortunately the filters were being applied before we had resolved
the symlink to point to a directory. This meant that symlinks pointing
to directories were filtered out when they shouldn't have been.

This was fixed by moving the filter check until after the symlink had
been resolved.

See: https://forum.rclone.org/t/copy-links-not-following-symlinks-on-1-60-0/34073/7
2022-11-14 18:03:40 +00:00
Nick Craig-Wood
652d3cdee4 vfs: windows: fix slow opening of exe files by not truncating files when not necessary
Before this change we truncated files in the backing store regardless
of whether we needed to or not.

After, we check to see if the file is the right size and don't
truncate if it is.

Apparently Windows Defender likes to check executables each time they
are modified, and truncating a file to its existing size is enough to
trigger the Windows Defender scan. This was causing a big slowdown for
operations which opened and closed the file a lot, such as looking at
properties on an executable.

See: https://forum.rclone.org/t/for-mount-sftp-why-right-click-on-exe-file-is-so-slow-until-it-freezes/33830
2022-11-14 17:05:51 +00:00
Nick Craig-Wood
bb1fc5b86d Add Kamui to contributors 2022-11-14 17:05:51 +00:00
Kamui
efd3c6449b rcserver: avoid generating default credentials with htpasswd - fixes #4839 2022-11-14 15:26:44 +00:00
Nick Craig-Wood
0ac5795f8c fs: make all duration flags take y, M, w, d etc suffixes
Fixes #6556
2022-11-14 15:13:49 +00:00
Nick Craig-Wood
2f77651f64 Add rkettelerij to contributors 2022-11-14 15:13:49 +00:00
Nick Craig-Wood
8daacc2b99 Add techknowlogick to contributors 2022-11-14 15:13:49 +00:00
rkettelerij
87fa9f8e46 azureblob: Add support for custom upload headers 2022-11-14 15:12:28 +00:00
albertony
1392793334 sftp: auto-detect shell type for fish
Fish is different from POSIX-based Unix shells such as bash,
and a bracketed variable references like we use for the
auto-detection echo command is not supported. The command
will return with zero exit code but produce no output on
stdout. There is a message on stderr, but we don't log it
due to the zero exit code:

fish: Variables cannot be bracketed. In fish, please use {$ShellId}.

Fixes #6552
2022-11-11 15:32:44 +00:00
techknowlogick
0e427216db s3: Add additional Wasabi locations 2022-11-11 14:39:12 +00:00
Anagh Kumar Baranwal
0c56c46523 rc: Add commands to set GC Percent & Memory Limit (1.19+)
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2022-11-10 12:07:18 +00:00
Nick Craig-Wood
617c5d5e1b rcat: preserve metadata when Copy falls back to Rcat
Before this change if we copied files of unknown size, then they lost
their metadata.

This was particularly noticeable using --s3-decompress.

This change adds metadata to Rcat and RcatSized and changes Copy to
pass the metadata in when it calls Rcat for an unknown sized input.

Fixes #6546
2022-11-10 12:04:35 +00:00
Nick Craig-Wood
ec2024b907 fstest: use WithMetadata / WithMimeType 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
458845ce89 fs/object: add WithMetadata and WithMimetype to static and memory objects 2022-11-10 12:04:35 +00:00
Nick Craig-Wood
57bde20acd Add Aaron Gokaslan to contributors 2022-11-10 12:04:35 +00:00
Aaron Gokaslan
b0248e8070 s3: fix for unchecked err value in s3 listv2 2022-11-10 11:52:59 +00:00
Nick Craig-Wood
b285efb476 mailru: allow timestamps to be before the epoch 1970-01-01
Fixes #6547
2022-11-10 11:27:01 +00:00
Nick Craig-Wood
be6f29930b dedupe: make dedupe obey the filters
See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
653bc23728 dedupe: count Checks in the stats while scanning for duplicates
This allows the user to see rclone has not hung.

See: https://forum.rclone.org/t/dial-tcp-lookup-api-pcloud-com-no-such-host/33910/
2022-11-10 09:56:02 +00:00
Nick Craig-Wood
47b04580db accounting: make it so we can account directories as well as files 2022-11-10 09:56:02 +00:00
Nick Craig-Wood
919e28b8bf lib/cache: fix alias backend shutting down too soon
Before this patch, when an alias backend was created it would be
renamed to be canonical and in the process Shutdown would be called on
it. This was particularly noticeable with the dropbox backend which
gave this error when uploading files after the backend was Shutdown.

    Failed to copy: upload failed: batcher is shutting down

This patch fixes the cache Rename code not to finalize objects if the
object that is being overwritten is the same as the existing object.

See: https://forum.rclone.org/t/upload-failed-batcher-is-shutting-down/33900
2022-11-09 16:29:23 +00:00
Nick Craig-Wood
3a3bc5a1ae mailru: note that an app password is now needed - fixes #6398 2022-11-08 20:33:11 +00:00
Nick Craig-Wood
133c006c37 Add Roel Arents to contributors 2022-11-08 20:33:11 +00:00
Roel Arents
e455940f71 azureblob: allow emulator account/key override 2022-11-08 20:24:06 +00:00
Nick Craig-Wood
65528fd009 docs: remove link to rclone slack as it is no longer supported 2022-11-08 16:11:34 +00:00
Nick Craig-Wood
691159fe94 s3: allow Storj to server side copy since it seems to work now - fixes #6550 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
09858c0c5a Add Arnie97 to contributors 2022-11-08 16:05:24 +00:00
Nick Craig-Wood
5fd0abb2b9 Add x3-apptech to contributors 2022-11-08 16:05:24 +00:00
Arnie97
36c37ffec1 backend/http: rename stat to decodeMetadata 2022-11-08 13:04:17 +00:00
Arnie97
6a5b7664f7 backend/http: support content-range response header 2022-11-08 13:04:17 +00:00
Arnie97
ebac854512 backend/http: do not update object size based on range requests 2022-11-08 13:04:17 +00:00
Arnie97
cafce96185 backend/http: parse get responses when no_head is set 2022-11-08 13:04:17 +00:00
João Henrique Franco
92ffcf9f86 wasm: fix walltime link error by adding up-to-date wasm_exec.js
Solves link error while running rclone's wasm version. Go's `walltime1` function was renamed to `walltime`. This commit updates wasm_exec.js with the new name.
2022-11-07 12:13:23 +00:00
albertony
64cdbb67b5 ncdu: add support for modification time 2022-11-07 11:57:44 +00:00
albertony
528fc899fb ncdu: fallback to sort by name also for sort by average size 2022-11-07 11:57:44 +00:00
x3-apptech
d452f502c3 cmd: Enable SIGINFO (Ctrl-T) handler on FreeBSD, NetBSD, OpenBSD and Dragonfly BSD 2022-11-07 11:45:04 +00:00
albertony
5d6b8141ec Replace deprecated ioutil
As of Go 1.16, the same functionality is now provided by package io or
package os, and those implementations should be preferred in new code.
2022-11-07 11:41:47 +00:00
albertony
776e5ea83a docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:59:40 +01:00
albertony
c9acc06a49 Add Clément Notin to contributors 2022-11-07 08:51:49 +01:00
Clément Notin
a2dca02594 docs: fix character that was incorrectly interpreted as markdown 2022-11-07 08:50:21 +01:00
Joda Stößer
210331bf61 docs: fix typo remove in rclone_serve_restic command 2022-11-07 08:46:05 +01:00
Nick Craig-Wood
5b5fdc6bc5 s3: add provider quirk --s3-might-gzip to fix corrupted on transfer: sizes differ
Before this change, some files were giving this error when downloaded
from Cloudflare and other providers.

    ERROR corrupted on transfer: sizes differ NNN vs MMM

This is because these providers auto gzips the object when rclone
wasn't expecting it to. (AWS does not gzip objects without their being
uploaded gzipped).

This patch adds a quirk to for fix the problem and a flag to control
it. The quirk `might_gzip` is set to `true` for all providers except
AWS.

See: https://forum.rclone.org/t/s3-error-corrupted-on-transfer-sizes-differ-nnn-vs-mmm/33694/
Fixes: #6533
2022-11-04 16:53:32 +00:00
Nick Craig-Wood
0de74864b6 Add dgouju to contributors 2022-11-04 16:53:32 +00:00
dgouju
7042a11875 sftp: add configuration options to set ssh Ciphers / MACs / KeyExchange 2022-11-03 17:11:28 +00:00
Nick Craig-Wood
028832ce73 s3: if bucket or object ACL is empty string then don't add X-Amz-Acl: header - fixes #5730
Before this fix it was impossible to stop rclone generating an
X-Amx-Acl: header which is incompatible with GCS with uniform access
control and is generally deprecated at AWS.
2022-11-03 17:06:24 +00:00
Philip Harvey
c7c9356af5 s3: stop setting object and bucket ACL to "private" if it is an empty string #5730 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
3292c112c5 Add Philip Harvey to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
126d71b332 Add Anthony Pessy to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
df9be72a82 Add coultonluke to contributors 2022-11-03 17:06:24 +00:00
Nick Craig-Wood
6aa8f7409a Add Samuel Johnson to contributors 2022-11-03 17:06:24 +00:00
Anthony Pessy
10c884552c s3: use different strategy to resolve s3 region
The API endpoint GetBucketLocation requires
top level permission.

If we do an authenticated head request to a bucket, the bucket location will be returned in the HTTP headers.

Fixes #5066
2022-11-02 11:48:08 +00:00
albertony
2617610741 docs: add direct download link for windows arm64 2022-10-31 21:14:10 +01:00
coultonluke
53dd174f3d docs: corrected download links in windows install docs 2022-10-31 21:09:53 +01:00
albertony
65987f5970 lib/file: improve error message for create dir on non-existent network host on windows (#6420) 2022-10-28 21:00:22 +02:00
Manoj Ghosh
1fc864fb32 oracle-object-storage: doc fix
See #6521
2022-10-28 20:32:17 +02:00
albertony
22abcc9fd2 build: update golang.org/x/net dependency
This fixes vulnerability GO-2022-0969 reported by govulncheck:

HTTP/2 server connections can hang forever waiting for a clean
shutdown that was preempted by a fatal error. This condition can
be exploited by a malicious client to cause a denial of service.

Call stacks in your code:
Error: cmd/serve/restic/restic.go:150:22: github.com/rclone/rclone/cmd/serve/restic.init$1$1 calls golang.org/x/net/http2.Server.ServeConn

Found in: golang.org/x/net/http2@v0.0.0-20220805013720-a33c5aa5df48
Fixed in: golang.org/x/net/http2@v0.0.0-20220906165146-f3363e06e74c
More info: https://pkg.go.dev/vuln/GO-2022-0969
2022-10-26 12:59:31 +02:00
albertony
178cf821de build: add vulnerability testing using govulncheck 2022-10-26 12:59:31 +02:00
albertony
f4a571786c local: clean absolute paths - fixes #6493 2022-10-25 21:09:56 +02:00
albertony
c0a8ffcbef build: setup-go v3 improved semver notation 2022-10-25 20:25:39 +02:00
albertony
76eeca9eae build: setup-go v3 dropped the stable input 2022-10-25 20:25:39 +02:00
Samuel Johnson
8114744bce docs: Update faq.md with bisync
Updated FAQ to clarify that experimental bi-sync is now available.
2022-10-23 11:15:09 +01:00
Nick Craig-Wood
db5d582404 Start v1.61.0-DEV development 2022-10-21 16:15:53 +01:00
154 changed files with 1972 additions and 1167 deletions

View File

@@ -30,7 +30,7 @@ jobs:
include:
- job_name: linux
os: ubuntu-latest
go: '1.19.x'
go: '1.19'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -41,14 +41,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.19.x'
go: '1.19'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.19.x'
go: '1.19'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -57,14 +57,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.19.x'
go: '1.19'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.19.x'
go: '1.19'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -74,20 +74,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.19.x'
go: '1.19'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.17
os: ubuntu-latest
go: '1.17.x'
go: '1.17'
quicktest: true
racequicktest: true
- job_name: go1.18
os: ubuntu-latest
go: '1.18.x'
go: '1.18'
quicktest: true
racequicktest: true
@@ -104,7 +104,6 @@ jobs:
- name: Install Go
uses: actions/setup-go@v3
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
@@ -234,6 +233,19 @@ jobs:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v3
with:
go-version: 1.19
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
@@ -250,7 +262,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.19.x
go-version: 1.19
- name: Go module cache
uses: actions/cache@v3

View File

@@ -1 +1 @@
v1.60.0
v1.61.0

View File

@@ -12,9 +12,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
@@ -595,7 +595,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
)
switch {
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
var actualEmulatorAccount = emulatorAccount
if opt.Account != "" {
actualEmulatorAccount = opt.Account
}
var actualEmulatorKey = emulatorAccountKey
if opt.Key != "" {
actualEmulatorKey = opt.Key
}
credential, err := azblob.NewSharedKeyCredential(actualEmulatorAccount, actualEmulatorKey)
if err != nil {
return nil, fmt.Errorf("failed to parse credentials: %w", err)
}
@@ -717,7 +725,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
}
// Try loading service principal credentials from file.
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
if err != nil {
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
}
@@ -1677,6 +1685,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
httpHeaders.CacheControl = value
case "content-disposition":
httpHeaders.ContentDisposition = value
case "content-encoding":
httpHeaders.ContentEncoding = value
case "content-language":
httpHeaders.ContentLanguage = value
case "content-type":
httpHeaders.ContentType = value
}
}
uploadParts := maxUploadParts
if uploadParts < 1 {
uploadParts = 1

View File

@@ -9,7 +9,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
@@ -97,7 +96,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
_, err = io.Copy(io.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
@@ -112,12 +111,12 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
body, _ := io.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
b, err := io.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
}

View File

@@ -17,9 +17,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
@@ -183,7 +183,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
file, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}

View File

@@ -11,7 +11,6 @@ import (
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -167,7 +166,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
@@ -841,7 +840,7 @@ func newRun() *run {
}
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
}
@@ -984,7 +983,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := ioutil.TempFile("", "rclonecache-tempfile")
f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err)
for i := 0; i < int(cnt); i++ {

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue
}
var data []byte
data, err = ioutil.ReadAll(resp.Body)
data, err = io.ReadAll(resp.Body)
if err != nil {
continue
}

View File

@@ -9,7 +9,6 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -473,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := ioutil.ReadFile(fp)
data, err := os.ReadFile(fp)
if err != nil {
return nil, err
}
@@ -486,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := ioutil.WriteFile(filePath, data, os.ModePerm)
err := os.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}

View File

@@ -12,7 +12,6 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
@@ -1038,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil {
return err
}
metadata, err := ioutil.ReadAll(reader)
metadata, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return err
@@ -1097,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err

View File

@@ -5,7 +5,7 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"io"
"path"
"regexp"
"strings"
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
chunkContents, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_, err = io.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
in := io.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
data, err := io.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()

View File

@@ -13,7 +13,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
@@ -468,7 +467,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
}
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := ioutil.TempFile("", "rclone-press-")
tempFile, err := os.CreateTemp("", "rclone-press-")
defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them
@@ -546,8 +545,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)

View File

@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
@@ -1073,7 +1072,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err)
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
assert.NoError(t, err)
sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1144,15 +1143,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
out, err := ioutil.ReadAll(encrypted)
out, err := io.ReadAll(encrypted)
assert.NoError(t, err)
assert.Equal(t, test.expected, out)
// Check we can decode the data properly too...
buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
decrypted, err := c.DecryptData(io.NopCloser(buf))
assert.NoError(t, err)
out, err = ioutil.ReadAll(decrypted)
out, err = io.ReadAll(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.in, out)
}
@@ -1187,7 +1186,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -1257,12 +1256,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
in := io.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(io.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}
@@ -1274,14 +1273,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data
const dataSize = 150000
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
plaintext, err := io.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err)
// Encrypt the data
buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
ciphertext, err := ioutil.ReadAll(encrypted)
ciphertext, err := io.ReadAll(encrypted)
assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1300,7 +1299,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext)
}
}
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
}
@@ -1490,7 +1489,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what)
continue
}
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
var expectedErr error
switch {
case i == fileHeaderSize:
@@ -1514,7 +1513,7 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed)
@@ -1524,13 +1523,13 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16)
for i := range file16copy {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = ioutil.ReadAll(fh)
_, err = io.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
}
file16copy[i] ^= 0xFF
@@ -1565,7 +1564,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed)
// close after reading
out, err := ioutil.ReadAll(fh)
out, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err)

View File

@@ -14,9 +14,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"os"
"path"
"regexp"
"sort"
@@ -1108,7 +1108,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -3800,7 +3800,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewReader(data)), nil
return io.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"os"
"path"
@@ -78,7 +77,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`

View File

@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -1186,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id")
}
if o.contentType == emptyMimeType {
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
return io.NopCloser(bytes.NewReader([]byte{})), nil
}
fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{

View File

@@ -19,8 +19,8 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strconv"
"strings"
@@ -487,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}

View File

@@ -3,7 +3,7 @@ package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"path"
"testing"
@@ -99,7 +99,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
buf, err := io.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)

View File

@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
@@ -118,7 +117,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(ioutil.Discard, r); err != nil {
if _, err = io.Copy(io.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}

View File

@@ -13,7 +13,6 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -305,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.stat(ctx)
err := o.head(ctx)
if err != nil {
return nil, err
}
@@ -317,15 +316,6 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
@@ -500,7 +490,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
switch err := file.head(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
@@ -579,8 +569,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote)
}
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
// head sends a HEAD request to update info fields in the Object
func (o *Object) head(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
@@ -601,13 +591,19 @@ func (o *Object) stat(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to stat: %w", err)
}
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -653,6 +649,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil
}

View File

@@ -3,7 +3,7 @@ package http
import (
"context"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"net/url"
@@ -41,12 +41,12 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := ioutil.ReadDir(filesPath)
fileList, err := os.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
@@ -194,31 +194,66 @@ func TestNewObject(t *testing.T) {
}
func TestOpen(t *testing.T) {
f, tidy := prepare(t)
m, tidy := prepareServer(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
for _, head := range []bool{false, true} {
if !head {
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
for _, rangeRead := range []bool{false, true} {
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
if !head {
// Test mod time is still indeterminate
tObj := o.ModTime(context.Background())
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
// Test file size is still indeterminate
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
}
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
func TestMimeType(t *testing.T) {

View File

@@ -12,7 +12,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -822,7 +821,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
refreshBody, err := io.ReadAll(req.Body)
if err != nil {
return
}
@@ -832,7 +831,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
}
}
@@ -1789,7 +1788,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File
// create the cache file
tempFile, err = ioutil.TempFile("", cachePrefix)
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
@@ -1817,7 +1816,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = ioutil.ReadAll(teeReader)
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
@@ -1914,7 +1913,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :)
var result api.UploadResponse
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
_, err = io.CopyN(io.Discard, in, response.ResumePos)
if err != nil {
return err
}

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -124,8 +123,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
are being uploaded and aborts with a message which starts "can't copy -
source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -521,11 +520,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
@@ -542,6 +536,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
mode = fi.Mode()
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
@@ -646,7 +645,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
fd, err := os.CreateTemp("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
@@ -1073,7 +1072,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
@@ -1400,30 +1399,27 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
} else {
s = filepath.Clean(s)
}
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = file.UNCPath(s)
}
return s
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s)
return s
}

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"io"
"os"
"path"
"path/filepath"
@@ -150,7 +150,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object
in, err := o.Open(ctx)
require.NoError(t, err)
contents, err := ioutil.ReadAll(in)
contents, err := io.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
@@ -158,7 +158,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = ioutil.ReadAll(in)
contents, err = io.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())

View File

@@ -1,7 +1,6 @@
package local
import (
"io/ioutil"
"os"
"sync"
"testing"
@@ -13,7 +12,7 @@ import (
// Check we can remove an open file
func TestRemove(t *testing.T) {
fd, err := ioutil.TempFile("", "rclone-remove-test")
fd, err := os.CreateTemp("", "rclone-remove-test")
require.NoError(t, err)
name := fd.Name()
defer func() {

View File

@@ -69,6 +69,11 @@ func (w *BinWriter) WritePu64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteP64 writes an signed long as unsigned varint
func (w *BinWriter) WriteP64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) {
buf := []byte(str)

View File

@@ -18,7 +18,6 @@ import (
"encoding/hex"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
@@ -91,8 +90,13 @@ func init() {
Help: "User name (usually email).",
Required: true,
}, {
Name: "pass",
Help: "Password.",
Name: "pass",
Help: `Password.
This must be an app password - rclone will not work with your normal
password. See the Configuration section in the docs for how to make an
app password.
`,
Required: true,
IsPassword: true,
}, {
@@ -641,12 +645,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
return nil, -1, err
}
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
modTime := time.Unix(int64(item.Mtime), 0)
isDir, err := f.isDir(item.Kind, remote)
if err != nil {
@@ -1660,7 +1659,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
fileBuf, err = ioutil.ReadAll(in)
fileBuf, err = io.ReadAll(in)
if err != nil {
return err
}
@@ -1703,7 +1702,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size <= mrhash.Size {
// Optimize upload: skip extra request if data fits in the hash buffer.
if fileBuf == nil {
fileBuf, err = ioutil.ReadAll(wrapIn)
fileBuf, err = io.ReadAll(wrapIn)
}
if fileHash == nil && err == nil {
fileHash = mrhash.Sum(fileBuf)
@@ -2058,7 +2057,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
req.WritePu16(0) // revision
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
req.WritePu64(o.size)
req.WritePu64(o.modTime.Unix())
req.WriteP64(o.modTime.Unix())
req.WritePu32(0)
req.Write(o.mrHash)
@@ -2214,7 +2213,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
_, err = io.CopyN(io.Discard, wrapStream, start)
if err != nil {
closeBody(res)
return nil, err

View File

@@ -8,7 +8,6 @@ import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"sync"
@@ -575,7 +574,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
return io.NopCloser(bytes.NewBuffer(data)), nil
}
// Update the object with the contents of the io.Reader, modTime and size
@@ -583,7 +582,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := ioutil.ReadAll(in)
data, err := io.ReadAll(in)
if err != nil {
return fmt.Errorf("failed to update memory object: %w", err)
}

View File

@@ -12,7 +12,6 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -972,7 +971,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
URL = o.fs.url(src.Remote())
}
if strings.HasSuffix(URL, ".rclonelink") {
bits, err := ioutil.ReadAll(in)
bits, err := io.ReadAll(in)
if err != nil {
return err
}
@@ -1058,7 +1057,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
reader := strings.NewReader(o.target)
readcloser := ioutil.NopCloser(reader)
readcloser := io.NopCloser(reader)
return readcloser, nil
}

View File

@@ -686,6 +686,7 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -15,7 +15,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -26,6 +25,8 @@ import (
"sync"
"time"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
@@ -1126,15 +1127,39 @@ func init() {
Provider: "LyveCloud",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
Help: "Wasabi US East 1 (N. Virginia)",
Provider: "Wasabi",
}, {
Value: "s3.us-east-2.wasabisys.com",
Help: "Wasabi US East 2 (N. Virginia)",
Provider: "Wasabi",
}, {
Value: "s3.us-central-1.wasabisys.com",
Help: "Wasabi US Central 1 (Texas)",
Provider: "Wasabi",
}, {
Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West endpoint",
Help: "Wasabi US West 1 (Oregon)",
Provider: "Wasabi",
}, {
Value: "s3.ca-central-1.wasabisys.com",
Help: "Wasabi CA Central 1 (Toronto)",
Provider: "Wasabi",
}, {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint",
Help: "Wasabi EU Central 1 (Amsterdam)",
Provider: "Wasabi",
}, {
Value: "s3.eu-central-2.wasabisys.com",
Help: "Wasabi EU Central 2 (Frankfurt)",
Provider: "Wasabi",
}, {
Value: "s3.eu-west-1.wasabisys.com",
Help: "Wasabi EU West 1 (London)",
Provider: "Wasabi",
}, {
Value: "s3.eu-west-2.wasabisys.com",
Help: "Wasabi EU West 2 (Paris)",
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
@@ -1144,6 +1169,14 @@ func init() {
Value: "s3.ap-northeast-2.wasabisys.com",
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
Provider: "Wasabi",
}, {
Value: "s3.ap-southeast-1.wasabisys.com",
Help: "Wasabi AP Southeast 1 (Singapore)",
Provider: "Wasabi",
}, {
Value: "s3.ap-southeast-2.wasabisys.com",
Help: "Wasabi AP Southeast 2 (Sydney)",
Provider: "Wasabi",
}, {
Value: "s3.ir-thr-at1.arvanstorage.com",
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
@@ -1537,7 +1570,11 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
`,
Provider: "!Storj,Cloudflare",
Examples: []fs.OptionExample{{
Value: "default",
@@ -1591,7 +1628,11 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets. If it
isn't set then "acl" is used instead.`,
isn't set then "acl" is used instead.
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
header is added and the default (private) will be used.
`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
@@ -2164,6 +2205,31 @@ can't check the size and hash but the file contents will be decompressed.
`,
Advanced: true,
Default: false,
}, {
Name: "might_gzip",
Help: strings.ReplaceAll(`Set this if the backend might gzip objects.
Normally providers will not alter objects when they are downloaded. If
an object was not uploaded with |Content-Encoding: gzip| then it won't
be set on download.
However some providers may gzip objects even if they weren't uploaded
with |Content-Encoding: gzip| (eg Cloudflare).
A symptom of this would be receiving errors like
ERROR corrupted on transfer: sizes differ NNN vs MMM
If you set this flag and rclone downloads an object with
Content-Encoding: gzip set and chunked transfer encoding, then rclone
will decompress the object on the fly.
If this is set to unset (the default) then rclone will choose
according to the provider setting what to apply, but you can override
rclone's choice here.
`, "|", "`"),
Default: fs.Tristate{},
Advanced: true,
}, {
Name: "no_system_metadata",
Help: `Suppress setting and reading of system metadata`,
@@ -2295,6 +2361,7 @@ type Options struct {
Versions bool `config:"versions"`
VersionAt fs.Time `config:"version_at"`
Decompress bool `config:"decompress"`
MightGzip fs.Tristate `config:"might_gzip"`
NoSystemMetadata bool `config:"no_system_metadata"`
}
@@ -2655,10 +2722,12 @@ func setQuirks(opt *Options) {
virtualHostStyle = true
urlEncodeListings = true
useMultipartEtag = true
mightGzip = true // assume all providers might gzip until proven otherwise
)
switch opt.Provider {
case "AWS":
// No quirks
mightGzip = false // Never auto gzips objects
case "Alibaba":
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
case "HuaweiOBS":
@@ -2772,6 +2841,12 @@ func setQuirks(opt *Options) {
opt.UseMultipartEtag.Valid = true
opt.UseMultipartEtag.Value = useMultipartEtag
}
// set MightGzip if not manually set
if !opt.MightGzip.Valid {
opt.MightGzip.Valid = true
opt.MightGzip.Value = mightGzip
}
}
// setRoot changes the root of the Fs
@@ -2780,6 +2855,14 @@ func (f *Fs) setRoot(root string) {
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// return a pointer to the string if non empty or nil if it is empty
func stringPointerOrNil(s string) *string {
if s == "" {
return nil
}
return &s
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -2799,9 +2882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Versions && opt.VersionAt.IsSet() {
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
}
if opt.ACL == "" {
opt.ACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
@@ -2889,7 +2969,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f, fs.ErrorIsFile
}
if opt.Provider == "Storj" {
f.features.Copy = nil
f.features.SetTier = false
f.features.GetTier = false
}
@@ -2992,19 +3071,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Gets the bucket location
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(ctx, err)
region, err := s3manager.GetBucketRegion(ctx, f.ses, bucket, "", func(r *request.Request) {
r.Config.S3ForcePathStyle = aws.Bool(f.opt.ForcePathStyle)
})
if err != nil {
return "", err
}
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
return region, nil
}
// Updates the region for the bucket by reading the region from the
@@ -3118,8 +3191,11 @@ func (f *Fs) newV2List(req *s3.ListObjectsV2Input) bucketLister {
// Do a V2 listing
func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) {
resp, err = ls.f.c.ListObjectsV2WithContext(ctx, &ls.req)
if err != nil {
return nil, nil, err
}
ls.req.ContinuationToken = resp.NextContinuationToken
return resp, nil, err
return resp, nil, nil
}
// URL Encode the listings
@@ -3676,7 +3752,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
return f.cache.Create(bucket, func() error {
req := s3.CreateBucketInput{
Bucket: &bucket,
ACL: &f.opt.BucketACL,
ACL: stringPointerOrNil(f.opt.BucketACL),
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
@@ -3741,7 +3817,7 @@ func pathEscape(s string) string {
// method
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
req.Bucket = &dstBucket
req.ACL = &f.opt.ACL
req.ACL = stringPointerOrNil(f.opt.ACL)
req.Key = &dstPath
source := pathEscape(path.Join(srcBucket, srcPath))
if src.versionID != nil {
@@ -4719,23 +4795,12 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
return nil, err
}
contentLength := &resp.ContentLength
if resp.Header.Get("Content-Range") != "" {
var contentRange = resp.Header.Get("Content-Range")
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
contentLength = &i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
contentLength := rest.ParseSizeFromHeaders(resp.Header)
if contentLength < 0 {
fs.Debugf(o, "Failed to parse file size from headers")
}
lastModified, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
lastModified, err := http.ParseTime(resp.Header.Get("Last-Modified"))
if err != nil {
fs.Debugf(o, "Failed to parse last modified from string %s, %v", resp.Header.Get("Last-Modified"), err)
}
@@ -4759,7 +4824,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
var head = s3.HeadObjectOutput{
ETag: header("Etag"),
ContentLength: contentLength,
ContentLength: &contentLength,
LastModified: &lastModified,
Metadata: metaData,
CacheControl: header("Cache-Control"),
@@ -4858,7 +4923,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Decompress body if necessary
if aws.StringValue(resp.ContentEncoding) == "gzip" {
if o.fs.opt.Decompress {
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
return readers.NewGzipReader(resp.Body)
}
o.fs.warnCompressed.Do(func() {
@@ -5105,7 +5170,7 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
// Can't upload zero length files like this for some reason
r.Body = bytes.NewReader([]byte{})
} else {
r.SetStreamingBody(ioutil.NopCloser(in))
r.SetStreamingBody(io.NopCloser(in))
}
r.SetContext(ctx)
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
@@ -5219,7 +5284,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
req := s3.PutObjectInput{
Bucket: &bucket,
ACL: &o.fs.opt.ACL,
ACL: stringPointerOrNil(o.fs.opt.ACL),
Key: &bucketPath,
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -633,7 +632,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
})
if start > 0 {
// We need to read and discard the beginning of the data...
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
_, err = io.CopyN(io.Discard, resp.Body, start)
if err != nil {
return nil, err
}

View File

@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"regexp"
@@ -123,7 +122,10 @@ This enables the use of the following insecure ciphers and key exchange methods:
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group-exchange-sha1
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.
This must be false if you use either ciphers or key_exchange advanced options.
`,
Default: false,
Examples: []fs.OptionExample{
{
@@ -325,6 +327,46 @@ and pass variables with spaces in in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
`,
Advanced: true,
}, {
Name: "ciphers",
Default: fs.SpaceSepList{},
Help: `Space separated list of ciphers to be used for session encryption, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q cipher.
This must not be set if use_insecure_cipher is true.
Example:
aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com
`,
Advanced: true,
}, {
Name: "key_exchange",
Default: fs.SpaceSepList{},
Help: `Space separated list of key exchange algorithms, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q kex.
This must not be set if use_insecure_cipher is true.
Example:
sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256
`,
Advanced: true,
}, {
Name: "macs",
Default: fs.SpaceSepList{},
Help: `Space separated list of MACs (message authentication code) algorithms, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q mac.
Example:
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
`,
Advanced: true,
}},
@@ -362,6 +404,9 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Concurrency int `config:"concurrency"`
SetEnv fs.SpaceSepList `config:"set_env"`
Ciphers fs.SpaceSepList `config:"ciphers"`
KeyExchange fs.SpaceSepList `config:"key_exchange"`
MACs fs.SpaceSepList `config:"macs"`
}
// Fs stores the interface to the remote SFTP files
@@ -702,10 +747,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
sshConfig.HostKeyCallback = hostcallback
}
if opt.UseInsecureCipher && (opt.Ciphers != nil || opt.KeyExchange != nil) {
return nil, fmt.Errorf("use_insecure_cipher must be false if ciphers or key_exchange are set in advanced configuration")
}
sshConfig.Config.SetDefaults()
if opt.UseInsecureCipher {
sshConfig.Config.SetDefaults()
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
} else {
if opt.Ciphers != nil {
sshConfig.Config.Ciphers = opt.Ciphers
}
if opt.KeyExchange != nil {
sshConfig.Config.KeyExchanges = opt.KeyExchange
}
}
if opt.MACs != nil {
sshConfig.Config.MACs = opt.MACs
}
keyFile := env.ShellExpand(opt.KeyFile)
@@ -722,7 +782,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
}
if keyFile != "" {
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
pubBytes, err := os.ReadFile(keyFile + ".pub")
if err != nil {
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
@@ -751,7 +811,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if keyFile != "" || opt.KeyPem != "" {
var key []byte
if opt.KeyPem == "" {
key, err = ioutil.ReadFile(keyFile)
key, err = os.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("failed to read private key file: %w", err)
}
@@ -782,7 +842,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// If a public key has been specified then use that
if pubkeyFile != "" {
certfile, err := ioutil.ReadFile(pubkeyFile)
certfile, err := os.ReadFile(pubkeyFile)
if err != nil {
return nil, fmt.Errorf("unable to read cert file: %w", err)
}
@@ -915,20 +975,24 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
err = session.Run(shellCmd)
_ = session.Close()
f.shellType = defaultShellType
if err != nil {
f.shellType = defaultShellType
fs.Debugf(f, "Remote command failed: %v (stdout=%v) (stderr=%v)", err, bytes.TrimSpace(stdout.Bytes()), bytes.TrimSpace(stderr.Bytes()))
} else {
outBytes := stdout.Bytes()
fs.Debugf(f, "Remote command result: %s", outBytes)
outString := string(bytes.TrimSpace(stdout.Bytes()))
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // If PowerShell: "Microsoft.PowerShell%ComSpec%"
f.shellType = "powershell"
} else if !strings.HasSuffix(outString, "%ComSpec%") { // If Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
f.shellType = "cmd"
} else { // If Unix: "%ComSpec%"
f.shellType = "unix"
}
if outString != "" {
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // PowerShell: "Microsoft.PowerShell%ComSpec%"
f.shellType = "powershell"
} else if !strings.HasSuffix(outString, "%ComSpec%") { // Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
// Additional positive test, to avoid misdetection on unpredicted Unix shell variants
s := strings.ToLower(outString)
if strings.Contains(s, ".exe") || strings.Contains(s, ".com") {
f.shellType = "cmd"
}
} // POSIX-based Unix shell: "%ComSpec%"
} // fish Unix shell: ""
}
}
// Save permanently in config to avoid the extra work next time

View File

@@ -77,7 +77,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -479,7 +478,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to open timezone db: %w", err)
}
tzdata, err := ioutil.ReadAll(timezone)
tzdata, err := io.ReadAll(timezone)
if err != nil {
return nil, fmt.Errorf("failed to read timezone: %w", err)
}

View File

@@ -10,7 +10,6 @@ import (
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
pathpkg "path"
@@ -119,7 +118,7 @@ func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
}
if f.grPos < f.seekPos {
// Fast-forward.
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
_, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos)
if err != nil {
return 0, err
}

View File

@@ -103,6 +103,10 @@ func (f *Fs) getSessions() int32 {
// Open a new connection to the SMB server.
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
// As we are pooling these connections we need to decouple
// them from the current context
ctx = context.Background()
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
if err != nil {
return nil, fmt.Errorf("couldn't connect SMB: %w", err)

View File

@@ -2,7 +2,7 @@ package sugarsync
import (
"bytes"
"io/ioutil"
"io"
"net/http"
"testing"
@@ -48,7 +48,7 @@ func TestErrorHandler(t *testing.T) {
} {
t.Run(test.name, func(t *testing.T) {
resp := http.Response{
Body: ioutil.NopCloser(bytes.NewBufferString(test.body)),
Body: io.NopCloser(bytes.NewBufferString(test.body)),
StatusCode: test.code,
Status: test.status,
}

View File

@@ -6,7 +6,6 @@ import (
"context"
"errors"
"io"
"io/ioutil"
"testing"
"github.com/ncw/swift/v2"
@@ -136,7 +135,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
buf := bytes.NewBufferString(contents[:errPosition])
errMessage := "potato"
er := &readers.ErrorReader{Err: errors.New(errMessage)}
in := ioutil.NopCloser(io.MultiReader(buf, er))
in := io.NopCloser(io.MultiReader(buf, er))
file.Size = contentSize
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)

View File

@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
@@ -87,7 +86,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
if len(entries) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(ioutil.Discard, readers[i])
_, _ = io.Copy(io.Discard, readers[i])
}
}
} else {

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"path/filepath"
"strings"
@@ -501,7 +500,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
if len(upstreams) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(ioutil.Discard, readers[i])
_, _ = io.Copy(io.Discard, readers[i])
}
return
}

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -239,7 +238,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) {
defer fs.CheckClose(resp.Body, &err)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}

View File

@@ -991,6 +991,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
}
return nil, fs.ErrorCantMove
}
srcFs := srcObj.fs
dstPath := f.filePath(remote)
err := f.mkParentDir(ctx, dstPath)
if err != nil {
@@ -1013,9 +1014,10 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
if f.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
// Direct the MOVE/COPY to the source server
err = srcFs.pacer.Call(func() (bool, error) {
resp, err = srcFs.srv.Call(ctx, &opts)
return srcFs.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("Copy call failed: %w", err)
@@ -1109,9 +1111,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
"Overwrite": "F",
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
// Direct the MOVE/COPY to the source server
err = srcFs.pacer.Call(func() (bool, error) {
resp, err = srcFs.srv.Call(ctx, &opts)
return srcFs.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("DirMove MOVE call failed: %w", err)

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -1219,7 +1218,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if partialContent && resp.StatusCode == 200 {
if start > 0 {
// We need to read and discard the beginning of the data...
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
_, err = io.CopyN(io.Discard, resp.Body, start)
if err != nil {
if resp != nil {
_ = resp.Body.Close()

View File

@@ -9,7 +9,6 @@ import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
@@ -240,7 +239,7 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
log.Printf("Failed to resolve path: %v", err)
return ""
}
err = ioutil.WriteFile(jsonPath, bs, 0644)
err = os.WriteFile(jsonPath, bs, 0644)
if err != nil {
log.Printf("Failed to write %s: %v", jsonPath, err)
return ""
@@ -476,7 +475,7 @@ func main() {
run("mkdir", "build")
}
chdir("build")
err := ioutil.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
err := os.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
if err != nil {
log.Fatalf("Couldn't write version.txt: %v", err)
}

View File

@@ -16,7 +16,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
@@ -168,7 +167,7 @@ func defaultBinDir() string {
// read the body or an error message
func readBody(in io.Reader) string {
data, err := ioutil.ReadAll(in)
data, err := io.ReadAll(in)
if err != nil {
return fmt.Sprintf("Error reading body: %v", err.Error())
}

View File

@@ -5,7 +5,6 @@ import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
@@ -56,7 +55,7 @@ func main() {
log.Fatalf("Syntax: %s", os.Args[0])
}
// v1.54.0
versionBytes, err := ioutil.ReadFile("VERSION")
versionBytes, err := os.ReadFile("VERSION")
if err != nil {
log.Fatalf("Failed to read version: %v", err)
}

View File

@@ -5,7 +5,6 @@ package bilib
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
@@ -106,7 +105,7 @@ func CopyDir(src string, dst string) (err error) {
return
}
entries, err := ioutil.ReadDir(src)
entries, err := os.ReadDir(src)
if err != nil {
return
}
@@ -122,7 +121,7 @@ func CopyDir(src string, dst string) (err error) {
}
} else {
// Skip symlinks.
if entry.Mode()&os.ModeSymlink != 0 {
if entry.Type()&os.ModeSymlink != 0 {
continue
}

View File

@@ -2,7 +2,7 @@ package bilib
import (
"bytes"
"io/ioutil"
"os"
"sort"
"strconv"
)
@@ -57,5 +57,5 @@ func SaveList(list []string, path string) error {
_, _ = buf.WriteString(strconv.Quote(s))
_ = buf.WriteByte('\n')
}
return ioutil.WriteFile(path, buf.Bytes(), PermSecure)
return os.WriteFile(path, buf.Bytes(), PermSecure)
}

View File

@@ -10,7 +10,6 @@ import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path"
@@ -303,7 +302,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
// Execute test scenario
scenFile := filepath.Join(b.testDir, "scenario.txt")
scenBuf, err := ioutil.ReadFile(scenFile)
scenBuf, err := os.ReadFile(scenFile)
scenReplacer := b.newReplacer(false)
require.NoError(b.t, err)
b.step = 0
@@ -903,8 +902,8 @@ func (b *bisyncTest) compareResults() int {
// save mangled logs so difference is easier on eyes
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
resultFile := filepath.Join(b.logDir, "mangled.result.log")
require.NoError(b.t, ioutil.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
require.NoError(b.t, ioutil.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
require.NoError(b.t, os.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
require.NoError(b.t, os.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
}
if goldenText == resultText {
@@ -974,7 +973,7 @@ func (b *bisyncTest) storeGolden() {
goldName := b.toGolden(fileName)
goldPath := filepath.Join(b.goldenDir, goldName)
err := ioutil.WriteFile(goldPath, []byte(text), bilib.PermSecure)
err := os.WriteFile(goldPath, []byte(text), bilib.PermSecure)
assert.NoError(b.t, err, "writing golden file %s", goldName)
if goldName != fileName {
@@ -986,7 +985,7 @@ func (b *bisyncTest) storeGolden() {
// mangleResult prepares test logs or listings for comparison
func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
buf, err := ioutil.ReadFile(filepath.Join(dir, file))
buf, err := os.ReadFile(filepath.Join(dir, file))
require.NoError(b.t, err)
text := string(buf)
@@ -1205,7 +1204,7 @@ func (b *bisyncTest) ensureDir(parent, dir string, optional bool) string {
}
func (b *bisyncTest) listDir(dir string) (names []string) {
files, err := ioutil.ReadDir(dir)
files, err := os.ReadDir(dir)
require.NoError(b.t, err)
for _, file := range files {
names = append(names, filepath.Base(file.Name()))

View File

@@ -9,7 +9,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -198,7 +197,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
_ = f.Close()
hashFile := filtersFile + ".md5"
wantHash, err := ioutil.ReadFile(hashFile)
wantHash, err := os.ReadFile(hashFile)
if err != nil && !opt.Resync {
return ctx, fmt.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile)
}
@@ -209,7 +208,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
if opt.Resync {
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
if err := ioutil.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
return ctx, err
}
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -81,7 +80,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
}
pidStr := []byte(strconv.Itoa(os.Getpid()))
if err = ioutil.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
if err = os.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
return fmt.Errorf("cannot create lock file: %s: %w", lockFile, err)
}
fs.Debugf(nil, "Lock file created: %s", lockFile)

View File

@@ -4,7 +4,6 @@ package cat
import (
"context"
"io"
"io/ioutil"
"log"
"os"
"strings"
@@ -77,7 +76,7 @@ Note that if offset is negative it will count from the end, so
fsrc := cmd.NewFsSrc(args)
var w io.Writer = os.Stdout
if discard {
w = ioutil.Discard
w = io.Discard
}
cmd.Run(false, false, command, func() error {
return operations.Cat(context.Background(), fsrc, w, offset, count)

View File

@@ -1,7 +1,6 @@
package genautocomplete
import (
"io/ioutil"
"os"
"testing"
@@ -9,7 +8,7 @@ import (
)
func TestCompletionBash(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_bash")
tempFile, err := os.CreateTemp("", "completion_bash")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
@@ -18,14 +17,14 @@ func TestCompletionBash(t *testing.T) {
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
bs, err := os.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionBashStdout(t *testing.T) {
originalStdout := os.Stdout
tempFile, err := ioutil.TempFile("", "completion_zsh")
tempFile, err := os.CreateTemp("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
@@ -37,13 +36,13 @@ func TestCompletionBashStdout(t *testing.T) {
bashCommandDefinition.Run(bashCommandDefinition, []string{"-"})
output, err := ioutil.ReadFile(tempFile.Name())
output, err := os.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(output))
}
func TestCompletionZsh(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_zsh")
tempFile, err := os.CreateTemp("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
@@ -52,14 +51,14 @@ func TestCompletionZsh(t *testing.T) {
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
bs, err := os.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionZshStdout(t *testing.T) {
originalStdout := os.Stdout
tempFile, err := ioutil.TempFile("", "completion_zsh")
tempFile, err := os.CreateTemp("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
@@ -70,13 +69,13 @@ func TestCompletionZshStdout(t *testing.T) {
defer func() { os.Stdout = originalStdout }()
zshCommandDefinition.Run(zshCommandDefinition, []string{"-"})
output, err := ioutil.ReadFile(tempFile.Name())
output, err := os.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(output))
}
func TestCompletionFish(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_fish")
tempFile, err := os.CreateTemp("", "completion_fish")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
@@ -85,14 +84,14 @@ func TestCompletionFish(t *testing.T) {
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
bs, err := os.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionFishStdout(t *testing.T) {
originalStdout := os.Stdout
tempFile, err := ioutil.TempFile("", "completion_zsh")
tempFile, err := os.CreateTemp("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
@@ -104,7 +103,7 @@ func TestCompletionFishStdout(t *testing.T) {
fishCommandDefinition.Run(fishCommandDefinition, []string{"-"})
output, err := ioutil.ReadFile(tempFile.Name())
output, err := os.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(output))
}

View File

@@ -3,7 +3,6 @@ package gendocs
import (
"bytes"
"io/ioutil"
"log"
"os"
"path"
@@ -71,7 +70,7 @@ rclone.org website.`,
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
err = os.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
if err != nil {
return err
}
@@ -129,7 +128,7 @@ rclone.org website.`,
return err
}
if !info.IsDir() {
b, err := ioutil.ReadFile(path)
b, err := os.ReadFile(path)
if err != nil {
return err
}
@@ -140,7 +139,7 @@ See the [global flags page](/flags/) for global options not listed here.
### SEE ALSO`, 1)
// outdent all the titles by one
doc = outdentTitle.ReplaceAllString(doc, `$1`)
err = ioutil.WriteFile(path, []byte(doc), 0777)
err = os.WriteFile(path, []byte(doc), 0777)
if err != nil {
return err
}

View File

@@ -8,7 +8,6 @@ import (
"bytes"
"flag"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -60,11 +59,11 @@ func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
if !bytes.Equal(buf1, buf2) {
log.Printf("Dumping different blocks")
err = ioutil.WriteFile("/tmp/z1", buf1, 0777)
err = os.WriteFile("/tmp/z1", buf1, 0777)
if err != nil {
log.Fatalf("Failed to write /tmp/z1: %v", err)
}
err = ioutil.WriteFile("/tmp/z2", buf2, 0777)
err = os.WriteFile("/tmp/z2", buf2, 0777)
if err != nil {
log.Fatalf("Failed to write /tmp/z2: %v", err)
}

View File

@@ -2,7 +2,6 @@ package mountlib_test
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -36,7 +35,7 @@ func TestRc(t *testing.T) {
assert.NotNil(t, getMountTypes)
localDir := t.TempDir()
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
err := os.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
require.NoError(t, err)
mountPoint := t.TempDir()

View File

@@ -89,11 +89,12 @@ func helpText() (tr []string) {
" ↑,↓ or k,j to Move",
" →,l to enter",
" ←,h to return",
" c toggle counts",
" g toggle graph",
" c toggle counts",
" a toggle average size in directory",
" m toggle modified time",
" u toggle human-readable format",
" n,s,C,A sort by name,size,count,average size",
" n,s,C,A,M sort by name,size,count,asize,mtime",
" d delete file/directory",
" v select file/directory",
" V enter visual select mode",
@@ -131,12 +132,14 @@ type UI struct {
showGraph bool // toggle showing graph
showCounts bool // toggle showing counts
showDirAverageSize bool // toggle average size
showModTime bool // toggle showing timestamps
humanReadable bool // toggle human-readable format
visualSelectMode bool // toggle visual selection mode
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
sortBySize int8
sortByName int8 // +1 for normal (lexical), 0 for off, -1 for reverse
sortBySize int8 // +1 for normal (largest first), 0 for off, -1 for reverse (smallest first)
sortByCount int8
sortByAverageSize int8
sortByModTime int8 // +1 for normal (newest first), 0 for off, -1 for reverse (oldest first)
dirPosMap map[string]dirPos // store for directory positions
selectedEntries map[string]dirPos // selected entries of current directory
}
@@ -332,6 +335,7 @@ func (u *UI) hasEmptyDir() bool {
// Draw the current screen
func (u *UI) Draw() error {
ctx := context.Background()
w, h := termbox.Size()
u.dirListHeight = h - 3
@@ -365,7 +369,13 @@ func (u *UI) Draw() error {
if y >= h-1 {
break
}
attrs, err := u.d.AttrI(u.sortPerm[n])
var attrs scan.Attrs
var err error
if u.showModTime {
attrs, err = u.d.AttrWithModTimeI(ctx, u.sortPerm[n])
} else {
attrs, err = u.d.AttrI(u.sortPerm[n])
}
_, isSelected := u.selectedEntries[entry.String()]
fg := termbox.ColorWhite
if attrs.EntriesHaveErrors {
@@ -421,6 +431,9 @@ func (u *UI) Draw() error {
extras += strings.Repeat(" ", len(ss))
}
}
if u.showModTime {
extras += attrs.ModTime.Local().Format("2006-01-02 15:04:05") + " "
}
if showEmptyDir {
if attrs.IsDir && attrs.Count == 0 && fileFlag == ' ' {
fileFlag = 'e'
@@ -656,8 +669,15 @@ type ncduSort struct {
// Less is part of sort.Interface.
func (ds *ncduSort) Less(i, j int) bool {
var iAvgSize, jAvgSize float64
iattrs, _ := ds.d.AttrI(ds.sortPerm[i])
jattrs, _ := ds.d.AttrI(ds.sortPerm[j])
var iattrs, jattrs scan.Attrs
if ds.u.sortByModTime != 0 {
ctx := context.Background()
iattrs, _ = ds.d.AttrWithModTimeI(ctx, ds.sortPerm[i])
jattrs, _ = ds.d.AttrWithModTimeI(ctx, ds.sortPerm[j])
} else {
iattrs, _ = ds.d.AttrI(ds.sortPerm[i])
jattrs, _ = ds.d.AttrI(ds.sortPerm[j])
}
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
if iattrs.Count > 0 {
iAvgSize = iattrs.AverageSize()
@@ -679,6 +699,14 @@ func (ds *ncduSort) Less(i, j int) bool {
if iattrs.Size != jattrs.Size {
return iattrs.Size > jattrs.Size
}
case ds.u.sortByModTime < 0:
if iattrs.ModTime != jattrs.ModTime {
return iattrs.ModTime.Before(jattrs.ModTime)
}
case ds.u.sortByModTime > 0:
if iattrs.ModTime != jattrs.ModTime {
return iattrs.ModTime.After(jattrs.ModTime)
}
case ds.u.sortByCount < 0:
if iattrs.Count != jattrs.Count {
return iattrs.Count < jattrs.Count
@@ -692,13 +720,17 @@ func (ds *ncduSort) Less(i, j int) bool {
return iAvgSize < jAvgSize
}
// if avgSize is equal, sort by size
return iattrs.Size < jattrs.Size
if iattrs.Size != jattrs.Size {
return iattrs.Size < jattrs.Size
}
case ds.u.sortByAverageSize > 0:
if iAvgSize != jAvgSize {
return iAvgSize > jAvgSize
}
// if avgSize is equal, sort by size
return iattrs.Size > jattrs.Size
if iattrs.Size != jattrs.Size {
return iattrs.Size > jattrs.Size
}
}
// if everything equal, sort by name
return iname < jname
@@ -843,8 +875,9 @@ func NewUI(f fs.Fs) *UI {
showCounts: false,
showDirAverageSize: false,
humanReadable: true,
sortByName: 0, // +1 for normal, 0 for off, -1 for reverse
sortBySize: 1,
sortByName: 0,
sortBySize: 1, // Sort by largest first
sortByModTime: 0,
sortByCount: 0,
dirPosMap: make(map[string]dirPos),
selectedEntries: make(map[string]dirPos),
@@ -933,6 +966,8 @@ outer:
u.enter()
case 'c':
u.showCounts = !u.showCounts
case 'm':
u.showModTime = !u.showModTime
case 'g':
u.showGraph = !u.showGraph
case 'a':
@@ -941,6 +976,8 @@ outer:
u.toggleSort(&u.sortByName)
case 's':
u.toggleSort(&u.sortBySize)
case 'M':
u.toggleSort(&u.sortByModTime)
case 'v':
u.toggleSelectForCursor()
case 'V':

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"path"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
@@ -31,6 +32,7 @@ type Dir struct {
// in the total count. They are not included in the size, i.e. treated
// as empty files, which means the size may be underestimated.
type Attrs struct {
ModTime time.Time
Size int64
Count int64
CountUnknownSize int64
@@ -193,20 +195,33 @@ func (d *Dir) Attr() (size int64, count int64) {
return d.size, d.count
}
// attrI returns the size, count and flags for the i-th directory entry
func (d *Dir) attrI(i int) (attrs Attrs, err error) {
subDir, isDir := d.getDir(i)
if !isDir {
return Attrs{time.Time{}, d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
}
if subDir == nil {
return Attrs{time.Time{}, 0, 0, 0, true, false, false}, nil
}
size, count := subDir.Attr()
return Attrs{time.Time{}, size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
}
// AttrI returns the size, count and flags for the i-th directory entry
func (d *Dir) AttrI(i int) (attrs Attrs, err error) {
d.mu.Lock()
defer d.mu.Unlock()
subDir, isDir := d.getDir(i)
return d.attrI(i)
}
if !isDir {
return Attrs{d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
}
if subDir == nil {
return Attrs{0, 0, 0, true, false, false}, nil
}
size, count := subDir.Attr()
return Attrs{size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
// AttrWithModTimeI returns the modtime, size, count and flags for the i-th directory entry
func (d *Dir) AttrWithModTimeI(ctx context.Context, i int) (attrs Attrs, err error) {
d.mu.Lock()
defer d.mu.Unlock()
attrs, err = d.attrI(i)
attrs.ModTime = d.entries[i].ModTime(ctx)
return
}
// Scan the Fs passed in, returning a root directory channel and an

View File

@@ -7,7 +7,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"os"
"strings"
@@ -204,7 +204,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
if resp.StatusCode != http.StatusOK {
var body []byte
body, err = ioutil.ReadAll(resp.Body)
body, err = io.ReadAll(resp.Body)
var bodyString string
if err == nil {
bodyString = string(body)

View File

@@ -66,7 +66,7 @@ a lot of data, you're better off caching locally and then
fdst, dstFileName := cmd.NewFsDstFile(args)
cmd.Run(false, false, command, func() error {
_, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now())
_, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now(), nil)
return err
})
},

View File

@@ -14,7 +14,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
@@ -227,7 +226,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
}
func installPackage(ctx context.Context, beta bool, version, siteURL, packageFormat string) error {
tempFile, err := ioutil.TempFile("", "rclone.*."+packageFormat)
tempFile, err := os.CreateTemp("", "rclone.*."+packageFormat)
if err != nil {
return fmt.Errorf("unable to write temporary package: %w", err)
}
@@ -357,7 +356,7 @@ func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, p
}
if packageFormat == "deb" || packageFormat == "rpm" {
if err := ioutil.WriteFile(newFile, archiveBuf, 0644); err != nil {
if err := os.WriteFile(newFile, archiveBuf, 0644); err != nil {
return fmt.Errorf("cannot write temporary .%s: %w", packageFormat, err)
}
return nil
@@ -471,5 +470,5 @@ func downloadFile(ctx context.Context, url string) ([]byte, error) {
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed with %s downloading %s", resp.Status, url)
}
return ioutil.ReadAll(resp.Body)
return io.ReadAll(resp.Body)
}

View File

@@ -5,7 +5,6 @@ package selfupdate
import (
"context"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -86,7 +85,7 @@ func TestInstallOnLinux(t *testing.T) {
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path, Version: fs.Version}))
// Must fail on non-writable file
assert.NoError(t, ioutil.WriteFile(path, []byte("test"), 0644))
assert.NoError(t, os.WriteFile(path, []byte("test"), 0644))
assert.NoError(t, os.Chmod(path, 0000))
err = (InstallUpdate(ctx, &Options{Beta: true, Output: path}))
assert.Error(t, err)
@@ -101,7 +100,7 @@ func TestInstallOnLinux(t *testing.T) {
assert.Equal(t, os.FileMode(0644), info.Mode().Perm())
// Must remove temporary files
files, err := ioutil.ReadDir(testDir)
files, err := os.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 1, len(files))
@@ -141,7 +140,7 @@ func TestRenameOnWindows(t *testing.T) {
// Must not create temporary files when target doesn't exist
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path}))
files, err := ioutil.ReadDir(testDir)
files, err := os.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 1, len(files))
@@ -152,7 +151,7 @@ func TestRenameOnWindows(t *testing.T) {
assert.NoError(t, cmdWait.Start())
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: false, Output: path}))
files, err = ioutil.ReadDir(testDir)
files, err = os.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 2, len(files))
@@ -189,7 +188,7 @@ func TestRenameOnWindows(t *testing.T) {
// Updating when the "old" executable is running must produce a random "old" file
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path}))
files, err = ioutil.ReadDir(testDir)
files, err = os.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 3, len(files))

View File

@@ -9,7 +9,6 @@ import (
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
pathpkg "path"
@@ -152,7 +151,7 @@ func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
}
if f.grPos < f.seekPos {
// Fast-forward.
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
_, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos)
if err != nil {
return 0, err
}

View File

@@ -6,7 +6,7 @@ package data
import (
"fmt"
"io/ioutil"
"io"
"text/template"
"github.com/rclone/rclone/fs"
@@ -21,7 +21,7 @@ func GetTemplate() (tpl *template.Template, err error) {
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
templateBytes, err := io.ReadAll(templateFile)
if err != nil {
return nil, fmt.Errorf("get template read: %w", err)
}

View File

@@ -5,7 +5,7 @@ import (
"context"
"fmt"
"html"
"io/ioutil"
"io"
"net/http"
"os"
"strings"
@@ -60,7 +60,7 @@ func TestRootSCPD(t *testing.T) {
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// Make sure that the SCPD contains a CDS service.
require.Contains(t, string(body),
@@ -80,7 +80,7 @@ func TestServeContent(t *testing.T) {
require.NoError(t, err)
defer fs.CheckClose(resp.Body, &err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
actualContents, err := ioutil.ReadAll(resp.Body)
actualContents, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
// Now compare the contents with the golden file.
@@ -90,7 +90,7 @@ func TestServeContent(t *testing.T) {
goldenReader, err := goldenFile.Open(os.O_RDONLY)
assert.NoError(t, err)
defer fs.CheckClose(goldenReader, &err)
goldenContents, err := ioutil.ReadAll(goldenReader)
goldenContents, err := io.ReadAll(goldenReader)
assert.NoError(t, err)
require.Equal(t, goldenContents, actualContents)
@@ -119,7 +119,7 @@ func TestContentDirectoryBrowseMetadata(t *testing.T) {
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// should contain an appropriate URN
require.Contains(t, string(body), "urn:schemas-upnp-org:service:ContentDirectory:1")
@@ -145,7 +145,7 @@ func TestMediaReceiverRegistrarService(t *testing.T) {
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Contains(t, string(body), "<RegistrationRespMsg>")
}
@@ -173,7 +173,7 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// expect video.mp4, video.srt, video.en.srt URLs to be in the DIDL
require.Contains(t, string(body), "/r/video.mp4")
@@ -201,7 +201,7 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, err = ioutil.ReadAll(resp.Body)
body, err = io.ReadAll(resp.Body)
require.NoError(t, err)
// expect video.mp4, video.srt, URLs to be in the DIDL
require.Contains(t, string(body), "/r/subdir/video.mp4")

View File

@@ -8,7 +8,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net"
"net/http"
"os"
@@ -280,7 +280,7 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
}
assert.Equal(t, wantStatus, res.StatusCode)
dataOut, err = ioutil.ReadAll(res.Body)
dataOut, err = io.ReadAll(res.Body)
require.NoError(t, err)
err = res.Body.Close()
require.NoError(t, err)
@@ -389,11 +389,11 @@ func testMountAPI(t *testing.T, sockAddr string) {
assert.Contains(t, res, "volume is in use")
text := []byte("banana")
err = ioutil.WriteFile(filepath.Join(mount1, "txt"), text, 0644)
err = os.WriteFile(filepath.Join(mount1, "txt"), text, 0644)
assert.NoError(t, err)
time.Sleep(tempDelay)
text2, err := ioutil.ReadFile(filepath.Join(path1, "txt"))
text2, err := os.ReadFile(filepath.Join(path1, "txt"))
assert.NoError(t, err)
if runtime.GOOS != "windows" {
// this check sometimes fails on windows - ignore

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -329,7 +328,7 @@ func (drv *Driver) saveState() error {
ctx := context.Background()
retries := fs.GetConfig(ctx).LowLevelRetries
for i := 0; i <= retries; i++ {
err = ioutil.WriteFile(drv.statePath, data, 0600)
err = os.WriteFile(drv.statePath, data, 0600)
if err == nil {
return nil
}
@@ -342,7 +341,7 @@ func (drv *Driver) saveState() error {
func (drv *Driver) restoreState(ctx context.Context) error {
fs.Debugf(nil, "Restore state from %s", drv.statePath)
data, err := ioutil.ReadFile(drv.statePath)
data, err := os.ReadFile(drv.statePath)
if os.IsNotExist(err) {
return nil
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
@@ -93,7 +92,7 @@ func writeSpecFile(addr, proto, specDir string) (string, error) {
}
specFile := filepath.Join(specDir, "rclone.spec")
url := fmt.Sprintf("%s://%s", proto, addr)
if err := ioutil.WriteFile(specFile, []byte(url), 0644); err != nil {
if err := os.WriteFile(specFile, []byte(url), 0644); err != nil {
return "", err
}
fs.Debugf(nil, "Plugin spec has been written to %s", specFile)

View File

@@ -10,7 +10,6 @@ import (
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
pathpkg "path"
@@ -112,7 +111,7 @@ func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
}
if f.grPos < f.seekPos {
// Fast-forward.
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
_, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos)
if err != nil {
return 0, err
}

View File

@@ -7,7 +7,8 @@ package data
import (
"fmt"
"html/template"
"io/ioutil"
"io"
"os"
"time"
"github.com/spf13/pflag"
@@ -70,7 +71,7 @@ func GetTemplate(tmpl string) (tpl *template.Template, err error) {
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
templateBytes, err := io.ReadAll(templateFile)
if err != nil {
return nil, fmt.Errorf("get template read: %w", err)
}
@@ -78,7 +79,7 @@ func GetTemplate(tmpl string) (tpl *template.Template, err error) {
templateString = string(templateBytes)
} else {
templateFile, err := ioutil.ReadFile(tmpl)
templateFile, err := os.ReadFile(tmpl)
if err != nil {
return nil, fmt.Errorf("get template open: %w", err)
}

View File

@@ -3,8 +3,9 @@ package http
import (
"context"
"flag"
"io/ioutil"
"io"
"net/http"
"os"
"strings"
"testing"
"time"
@@ -91,10 +92,10 @@ func TestInit(t *testing.T) {
func checkGolden(t *testing.T, fileName string, got []byte) {
if *updateGolden {
t.Logf("Updating golden file %q", fileName)
err := ioutil.WriteFile(fileName, got, 0666)
err := os.WriteFile(fileName, got, 0666)
require.NoError(t, err)
} else {
want, err := ioutil.ReadFile(fileName)
want, err := os.ReadFile(fileName)
require.NoError(t, err)
wants := strings.Split(string(want), "\n")
gots := strings.Split(string(got), "\n")
@@ -210,7 +211,7 @@ func TestGET(t *testing.T) {
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, test.Status, resp.StatusCode, test.Golden)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
// Check we got a Last-Modified header and that it is a valid date

View File

@@ -10,10 +10,10 @@ import (
"encoding/base64"
"fmt"
"html/template"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"strings"
"time"
@@ -315,7 +315,7 @@ func NewServer(handler http.Handler, opt *Options) *Server {
log.Fatalf("Can't use --client-ca without --cert and --key")
}
certpool := x509.NewCertPool()
pem, err := ioutil.ReadFile(s.Opt.ClientCA)
pem, err := os.ReadFile(s.Opt.ClientCA)
if err != nil {
log.Fatalf("Failed to read client certificate authority: %v", err)
}

View File

@@ -295,7 +295,7 @@ func (s *Server) postObject(w http.ResponseWriter, r *http.Request, remote strin
}
}
o, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now())
o, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now(), nil)
if err != nil {
err = accounting.Stats(r.Context()).Error(err)
fs.Errorf(remote, "Post request rcat error: %v", err)

View File

@@ -17,7 +17,6 @@ import (
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
@@ -311,7 +310,7 @@ func (s *server) Close() {
}
func loadPrivateKey(keyPath string) (ssh.Signer, error) {
privateBytes, err := ioutil.ReadFile(keyPath)
privateBytes, err := os.ReadFile(keyPath)
if err != nil {
return nil, fmt.Errorf("failed to load private key: %w", err)
}
@@ -326,7 +325,7 @@ func loadPrivateKey(keyPath string) (ssh.Signer, error) {
// the public key of a received connection
// with the entries in the authorized_keys file.
func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string]struct{}, err error) {
authorizedKeysBytes, err := ioutil.ReadFile(authorizedKeysPath)
authorizedKeysBytes, err := os.ReadFile(authorizedKeysPath)
if err != nil {
return nil, fmt.Errorf("failed to load authorized keys: %w", err)
}
@@ -369,7 +368,7 @@ func makeRSASSHKeyPair(bits int, pubKeyPath, privateKeyPath string) (err error)
if err != nil {
return err
}
return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644)
return os.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644)
}
// makeECDSASSHKeyPair make a pair of public and private keys for ECDSA SSH access.
@@ -401,7 +400,7 @@ func makeECDSASSHKeyPair(pubKeyPath, privateKeyPath string) (err error) {
if err != nil {
return err
}
return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644)
return os.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644)
}
// makeEd25519SSHKeyPair make a pair of public and private keys for Ed25519 SSH access.
@@ -433,5 +432,5 @@ func makeEd25519SSHKeyPair(pubKeyPath, privateKeyPath string) (err error) {
if err != nil {
return err
}
return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644)
return os.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644)
}

View File

@@ -11,7 +11,7 @@ package webdav
import (
"context"
"flag"
"io/ioutil"
"io"
"net/http"
"os"
"strings"
@@ -134,10 +134,10 @@ func TestHTTPFunction(t *testing.T) {
func checkGolden(t *testing.T, fileName string, got []byte) {
if *updateGolden {
t.Logf("Updating golden file %q", fileName)
err := ioutil.WriteFile(fileName, got, 0666)
err := os.WriteFile(fileName, got, 0666)
require.NoError(t, err)
} else {
want, err := ioutil.ReadFile(fileName)
want, err := os.ReadFile(fileName)
require.NoError(t, err, "problem")
wants := strings.Split(string(want), "\n")
gots := strings.Split(string(got), "\n")
@@ -253,7 +253,7 @@ func HelpTestGET(t *testing.T, testURL string) {
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
assert.Equal(t, test.Status, resp.StatusCode, test.Golden)
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
checkGolden(t, test.Golden, body)

View File

@@ -1,5 +1,5 @@
//go:build darwin
// +build darwin
//go:build darwin || freebsd || netbsd || dragonfly || openbsd
// +build darwin freebsd netbsd dragonfly openbsd
package cmd

View File

@@ -1,5 +1,5 @@
//go:build !darwin
// +build !darwin
//go:build !darwin && !freebsd && !netbsd && !dragonfly && !openbsd
// +build !darwin,!freebsd,!netbsd,!dragonfly,!openbsd
package cmd

View File

@@ -4,7 +4,7 @@ package version
import (
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"strings"
"time"
@@ -95,7 +95,7 @@ func GetVersion(url string) (v *semver.Version, vs string, date time.Time, err e
if resp.StatusCode != http.StatusOK {
return v, vs, date, errors.New(resp.Status)
}
bodyBytes, err := ioutil.ReadAll(resp.Body)
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return v, vs, date, err
}

View File

@@ -1,7 +1,6 @@
package version
import (
"io/ioutil"
"os"
"runtime"
"testing"
@@ -13,7 +12,7 @@ import (
func TestVersionWorksWithoutAccessibleConfigFile(t *testing.T) {
// create temp config file
tempFile, err := ioutil.TempFile("", "unreadable_config.conf")
tempFile, err := os.CreateTemp("", "unreadable_config.conf")
assert.NoError(t, err)
path := tempFile.Name()
defer func() {

View File

@@ -6,7 +6,6 @@
package cmdtest
import (
"io/ioutil"
"log"
"os"
"os/exec"
@@ -121,7 +120,7 @@ func removeTestEnvironment(t *testing.T) {
// createTestFile creates the file testFolder/name
func createTestFile(name string, t *testing.T) string {
err := ioutil.WriteFile(testFolder+"/"+name, []byte("content_of_"+name), 0666)
err := os.WriteFile(testFolder+"/"+name, []byte("content_of_"+name), 0666)
require.NoError(t, err)
return testFolder + "/" + name
}

View File

@@ -650,3 +650,17 @@ put them back in again.` >}}
* Manoj Ghosh <manoj.ghosh@oracle.com>
* Tom Mombourquette <tom@devnode.com>
* Robert Newson <rnewson@apache.org>
* Samuel Johnson <esamueljohnson@gmail.com>
* coultonluke <luke@luke.org.uk>
* Anthony Pessy <anthony@cogniteev.com>
* Philip Harvey <pharvey@battelleecology.org>
* dgouju <dgouju@users.noreply.github.com>
* Clément Notin <clement.notin@gmail.com>
* x3-apptech <66947598+x3-apptech@users.noreply.github.com>
* Arnie97 <arnie97@gmail.com>
* Roel Arents <2691308+roelarents@users.noreply.github.com>
* Aaron Gokaslan <aaronGokaslan@gmail.com>
* techknowlogick <matti@mdranta.net>
* rkettelerij <richard@mindloops.nl>
* Kamui <fin-kamui@pm.me>
* asdffdsazqqq <90116442+asdffdsazqqq@users.noreply.github.com>

View File

@@ -164,7 +164,7 @@ Here are the Standard options specific to azureblob (Microsoft Azure Blob Storag
Storage Account Name.
Leave blank to use SAS URL or Emulator.
Leave blank to use SAS URL.
Properties:
@@ -198,7 +198,7 @@ Properties:
Storage Account Key.
Leave blank to use SAS URL or Emulator.
Leave blank to use SAS URL.
Properties:
@@ -246,6 +246,8 @@ Uses local storage emulator if provided as 'true'.
Leave blank if using real azure storage endpoint.
Provide `account`, `key`, and `endpoint` if desired to override their _azurite_ defaults.
Properties:
- Config: use_emulator
@@ -512,6 +514,18 @@ Properties:
{{< rem autogenerated options stop >}}
### Custom upload headers
You can set custom upload headers with the `--header-upload` flag.
- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-Type
Eg `--header-upload "Content-Type: text/potato"`
## Limitations
MD5 sums are only uploaded with chunked files if the source has an MD5
@@ -528,6 +542,6 @@ See [List of backends that do not support rclone about](https://rclone.org/overv
You can run rclone with storage emulator (usually _azurite_).
To do this, just set up a new remote with `rclone config` following instructions described in introduction and set `use_emulator` config as `true`. You do not need to provide default account name neither an account key.
To do this, just set up a new remote with `rclone config` following instructions described in introduction and set `use_emulator` config as `true`. You do not need to provide default account name neither an account key. But you can override them in the _account_ and _key_ options. (Prior to v1.61 they were hard coded to _azurite_'s `devstoreaccount1`.)
Also, if you want to access a storage emulator instance running on a different machine, you can override _Endpoint_ parameter in advanced settings, setting it to `http(s)://<host>:<port>/devstoreaccount1` (e.g. `http://10.254.2.5:10000/devstoreaccount1`).

View File

@@ -11,7 +11,7 @@ Serve the remote for restic's REST API.
## Synopsis
Run a basic web server to serve a remove over restic's REST backend
Run a basic web server to serve a remote over restic's REST backend
API over HTTP. This allows restic to use rclone as a data storage
mechanism for cloud providers that restic does not support directly.

View File

@@ -85,7 +85,7 @@ script) from a URL which doesn't change then you can use these links.
| Intel/AMD - 32 Bit | {{< cdownload windows 386 >}} | - | {{< cdownload linux 386 >}} | {{< cdownload linux 386 deb >}} | {{< cdownload linux 386 rpm >}} | {{< cdownload freebsd 386 >}} | {{< cdownload netbsd 386 >}} | {{< cdownload openbsd 386 >}} | {{< cdownload plan9 386 >}} | - |
| ARMv6 - 32 Bit | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
| ARMv7 - 32 Bit | - | - | {{< cdownload linux arm-v7 >}} | {{< cdownload linux arm-v7 deb >}} | {{< cdownload linux arm-v7 rpm >}} | {{< cdownload freebsd arm-v7 >}} | {{< cdownload netbsd arm-v7 >}} | - | - | - |
| ARM - 64 Bit | - | {{< cdownload osx arm64 >}} | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
| ARM - 64 Bit | {{< cdownload windows arm64 >}} | {{< cdownload osx arm64 >}} | {{< cdownload linux arm64 >}} | {{< cdownload linux arm64 deb >}} | {{< cdownload linux arm64 rpm >}} | - | - | - | - | - |
| MIPS - Big Endian | - | - | {{< cdownload linux mips >}} | {{< cdownload linux mips deb >}} | {{< cdownload linux mips rpm >}} | - | - | - | - | - |
| MIPS - Little Endian | - | - | {{< cdownload linux mipsle >}} | {{< cdownload linux mipsle deb >}} | {{< cdownload linux mipsle rpm >}} | - | - | - | - | - |

View File

@@ -82,9 +82,8 @@ of metadata, which breaks the desired 1:1 mapping of files to objects.
### Can rclone do bi-directional sync? ###
No, not at present. rclone only does uni-directional sync from A ->
B. It may do in the future though since it has all the primitives - it
just requires writing the algorithm to do it.
Yes, since rclone v1.58.0, [bidirectional cloud sync](/bisync/) is
available.
### Can I use rclone with an HTTP proxy? ###
@@ -109,6 +108,14 @@ possibilities. So, on Linux, you may end up with code similar to
export HTTP_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
Note: If the proxy server requires a username and password, then use
export http_proxy=http://username:password@proxyserver:12345
export https_proxy=$http_proxy
export HTTP_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
The `NO_PROXY` allows you to disable the proxy for specific hosts.
Hosts must be comma separated, and can contain domains or parts.
For instance "foo.com" also matches "bar.foo.com".

View File

@@ -125,9 +125,9 @@ The simplest fix is to run
Fetch the correct binary for your processor type by clicking on these
links. If not sure, use the first link.
- [Intel/AMD - 64 Bit](https://downloads.rclone.org/rclone-current-linux-amd64.zip)
- [Intel/AMD - 32 Bit](https://downloads.rclone.org/rclone-current-linux-386.zip)
- [ARM - 64 Bit](https://downloads.rclone.org/rclone-current-linux-arm64.zip)
- [Intel/AMD - 64 Bit](https://downloads.rclone.org/rclone-current-windows-amd64.zip)
- [Intel/AMD - 32 Bit](https://downloads.rclone.org/rclone-current-windows-386.zip)
- [ARM - 64 Bit](https://downloads.rclone.org/rclone-current-windows-arm64.zip)
Open this file in the Explorer and extract `rclone.exe`. Rclone is a
portable executable so you can place it wherever is convenient.

View File

@@ -428,8 +428,8 @@ Properties:
Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
are being uploaded and aborts with a message which starts "can't copy -
source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this

View File

@@ -24,8 +24,21 @@ Currently it is recommended to disable 2FA on Mail.ru accounts intended for rclo
## Configuration
Here is an example of making a mailru configuration. First create a Mail.ru Cloud
account and choose a tariff, then run
Here is an example of making a mailru configuration.
First create a Mail.ru Cloud account and choose a tariff.
You will need to log in and create an app password for rclone. Rclone
**will not work** with your normal username and password - it will
give an error like `oauth2: server response missing access_token`.
- Click on your user icon in the top right
- Go to Security / "Пароль и безопасность"
- Click password for apps / "Пароли для внешних приложений"
- Add the password - give it a name - eg "rclone"
- Copy the password and use this password below - your normal login password won't work.
Now run
rclone config
@@ -51,6 +64,10 @@ User name (usually email)
Enter a string value. Press Enter for the default ("").
user> username@mail.ru
Password
This must be an app password - rclone will not work with your normal
password. See the Configuration section in the docs for how to make an
app password.
y) Yes type in my own password
g) Generate random password
y/g> y

View File

@@ -494,7 +494,7 @@ upon backend-specific capabilities.
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
| OpenStack Swift | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
| Oracle Object Storage | Yes | Yes | No | No | Yes | Yes | No | No | No | No |
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | No | No | No |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | Yes |
| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | Yes | Yes |

View File

@@ -33,7 +33,6 @@
<p class="menu">
<i class="fa fa-comments fa-fw" aria-hidden="true"></i> <a href="https://forum.rclone.org">Rclone forum</a><br />
<i class="fab fa-github fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone">GitHub project</a><br />
<i class="fab fa-slack fa-fw" aria-hidden="true"></i> <a href="https://slack-invite.rclone.org/">Rclone slack</a><br />
<i class="fa fa-book fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone/wiki">Rclone Wiki</a><br />
<i class="fa fa-heart heart fa-fw" aria-hidden="true"></i> <a href="/donate/">Donate</a><br />
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/njcw">@njcw</a>

View File

@@ -1 +1 @@
v1.60.0
v1.61.0

View File

@@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
"unicode/utf8"
@@ -29,7 +28,7 @@ var (
func TestNewAccountSizeName(t *testing.T) {
ctx := context.Background()
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
in := io.NopCloser(bytes.NewBuffer([]byte{1}))
stats := NewStats(ctx)
acc := newAccountSizeName(context.Background(), stats, in, 1, "test")
assert.Equal(t, in, acc.in)
@@ -44,7 +43,7 @@ func TestNewAccountSizeName(t *testing.T) {
func TestAccountWithBuffer(t *testing.T) {
ctx := context.Background()
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
in := io.NopCloser(bytes.NewBuffer([]byte{1}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, -1, "test")
@@ -68,7 +67,7 @@ func TestAccountGetUpdateReader(t *testing.T) {
ctx := context.Background()
test := func(doClose bool) func(t *testing.T) {
return func(t *testing.T) {
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
in := io.NopCloser(bytes.NewBuffer([]byte{1}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
@@ -80,7 +79,7 @@ func TestAccountGetUpdateReader(t *testing.T) {
require.NoError(t, acc.Close())
}
in2 := ioutil.NopCloser(bytes.NewBuffer([]byte{1}))
in2 := io.NopCloser(bytes.NewBuffer([]byte{1}))
acc.UpdateReader(ctx, in2)
assert.Equal(t, in2, acc.GetReader())
@@ -95,7 +94,7 @@ func TestAccountGetUpdateReader(t *testing.T) {
func TestAccountRead(t *testing.T) {
ctx := context.Background()
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
in := io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
@@ -137,7 +136,7 @@ func testAccountWriteTo(t *testing.T, withBuffer bool) {
for i := range buf {
buf[i] = byte(i % 251)
}
in := ioutil.NopCloser(bytes.NewBuffer(buf))
in := io.NopCloser(bytes.NewBuffer(buf))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, int64(len(buf)), "test")
if withBuffer {
@@ -178,7 +177,7 @@ func TestAccountWriteToWithBuffer(t *testing.T) {
func TestAccountString(t *testing.T) {
ctx := context.Background()
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
in := io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 3, "test")
@@ -199,13 +198,13 @@ func TestAccountString(t *testing.T) {
// Test the Accounter interface methods on Account and accountStream
func TestAccountAccounter(t *testing.T) {
ctx := context.Background()
in := ioutil.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
in := io.NopCloser(bytes.NewBuffer([]byte{1, 2, 3}))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 3, "test")
assert.True(t, in == acc.OldStream())
in2 := ioutil.NopCloser(bytes.NewBuffer([]byte{2, 3, 4}))
in2 := io.NopCloser(bytes.NewBuffer([]byte{2, 3, 4}))
acc.SetStream(in2)
assert.True(t, in2 == acc.OldStream())
@@ -228,7 +227,7 @@ func TestAccountAccounter(t *testing.T) {
assert.Equal(t, []byte{1, 2}, buf[:n])
// Test that we can get another accountstream out
in3 := ioutil.NopCloser(bytes.NewBuffer([]byte{3, 1, 2}))
in3 := io.NopCloser(bytes.NewBuffer([]byte{3, 1, 2}))
r2 := as.WrapStream(in3)
as2, ok := r2.(Accounter)
require.True(t, ok)
@@ -268,7 +267,7 @@ func TestAccountMaxTransfer(t *testing.T) {
ci.CutoffMode = oldMode
}()
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
in := io.NopCloser(bytes.NewBuffer(make([]byte, 100)))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
@@ -312,7 +311,7 @@ func TestAccountMaxTransferWriteTo(t *testing.T) {
ci.CutoffMode = oldMode
}()
in := ioutil.NopCloser(readers.NewPatternReader(1024))
in := io.NopCloser(readers.NewPatternReader(1024))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")
@@ -326,7 +325,7 @@ func TestAccountMaxTransferWriteTo(t *testing.T) {
func TestAccountReadCtx(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
in := io.NopCloser(bytes.NewBuffer(make([]byte, 100)))
stats := NewStats(ctx)
acc := newAccountSizeName(ctx, stats, in, 1, "test")

View File

@@ -675,7 +675,7 @@ func (s *StatsInfo) RetryAfter() time.Time {
}
// NewCheckingTransfer adds a checking transfer to the stats, from the object.
func (s *StatsInfo) NewCheckingTransfer(obj fs.Object) *Transfer {
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry) *Transfer {
tr := newCheckingTransfer(s, obj)
s.checking.add(tr)
return tr
@@ -697,7 +697,7 @@ func (s *StatsInfo) GetTransfers() int64 {
}
// NewTransfer adds a transfer to the stats from the object.
func (s *StatsInfo) NewTransfer(obj fs.Object) *Transfer {
func (s *StatsInfo) NewTransfer(obj fs.DirEntry) *Transfer {
tr := newTransfer(s, obj)
s.transferring.add(tr)
s.startAverageLoop()

View File

@@ -63,12 +63,12 @@ type Transfer struct {
}
// newCheckingTransfer instantiates new checking of the object.
func newCheckingTransfer(stats *StatsInfo, obj fs.Object) *Transfer {
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true)
}
// newTransfer instantiates new transfer.
func newTransfer(stats *StatsInfo, obj fs.Object) *Transfer {
func newTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false)
}

View File

@@ -6,7 +6,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"math/rand"
"strings"
"sync"
@@ -23,7 +22,7 @@ import (
func TestAsyncReader(t *testing.T) {
ctx := context.Background()
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
buf := io.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := New(ctx, buf, 4)
require.NoError(t, err)
@@ -48,7 +47,7 @@ func TestAsyncReader(t *testing.T) {
require.NoError(t, err)
// Test Close without reading everything
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
buf = io.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
ar, err = New(ctx, buf, 4)
require.NoError(t, err)
err = ar.Close()
@@ -59,7 +58,7 @@ func TestAsyncReader(t *testing.T) {
func TestAsyncWriteTo(t *testing.T) {
ctx := context.Background()
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
buf := io.NopCloser(bytes.NewBufferString("Testbuffer"))
ar, err := New(ctx, buf, 4)
require.NoError(t, err)
@@ -85,7 +84,7 @@ func TestAsyncReaderErrors(t *testing.T) {
require.Error(t, err)
// invalid buffer number
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
buf := io.NopCloser(bytes.NewBufferString("Testbuffer"))
_, err = New(ctx, buf, 0)
require.Error(t, err)
_, err = New(ctx, buf, -1)
@@ -170,7 +169,7 @@ func TestAsyncReaderSizes(t *testing.T) {
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := New(ctx, ioutil.NopCloser(buf), l)
ar, _ := New(ctx, io.NopCloser(buf), l)
s := bufreader.fn(ar)
// "timeout" expects the Reader to recover, AsyncReader does not.
if s != text && readmaker.name != "timeout" {
@@ -211,7 +210,7 @@ func TestAsyncReaderWriteTo(t *testing.T) {
bufsize := bufsizes[k]
read := readmaker.fn(strings.NewReader(text))
buf := bufio.NewReaderSize(read, bufsize)
ar, _ := New(ctx, ioutil.NopCloser(buf), l)
ar, _ := New(ctx, io.NopCloser(buf), l)
dst := &bytes.Buffer{}
_, err := ar.WriteTo(dst)
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
@@ -272,7 +271,7 @@ func testAsyncReaderClose(t *testing.T, writeto bool) {
close(started)
if writeto {
// exercise the WriteTo path
copyN, copyErr = a.WriteTo(ioutil.Discard)
copyN, copyErr = a.WriteTo(io.Discard)
} else {
// exercise the Read path
buf := make([]byte, 64*1024)
@@ -327,7 +326,7 @@ func TestAsyncReaderSkipBytes(t *testing.T) {
t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) {
for _, skip := range skips {
t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) {
ar, err := New(ctx, ioutil.NopCloser(bytes.NewReader(data)), buffers)
ar, err := New(ctx, io.NopCloser(bytes.NewReader(data)), buffers)
require.NoError(t, err)
wantSkipFalse := false

View File

@@ -4,7 +4,6 @@ package configfile
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -113,7 +112,7 @@ func (s *Storage) Save() error {
if err != nil {
return fmt.Errorf("failed to create config directory: %w", err)
}
f, err := ioutil.TempFile(dir, name)
f, err := os.CreateTemp(dir, name)
if err != nil {
return fmt.Errorf("failed to create temp file for new config: %w", err)
}

View File

@@ -2,7 +2,6 @@ package configfile
import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
@@ -30,7 +29,7 @@ fruit = banana
// Fill up a temporary config file with the testdata filename passed in
func setConfigFile(t *testing.T, data string) func() {
out, err := ioutil.TempFile("", "rclone-configfile-test")
out, err := os.CreateTemp("", "rclone-configfile-test")
require.NoError(t, err)
filePath := out.Name()
@@ -160,7 +159,7 @@ type = number3
`, toUnix(buf))
t.Run("Save", func(t *testing.T) {
require.NoError(t, data.Save())
buf, err := ioutil.ReadFile(config.GetConfigPath())
buf, err := os.ReadFile(config.GetConfigPath())
require.NoError(t, err)
assert.Equal(t, `[one]
fruit = potato

View File

@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
@@ -128,7 +127,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
// Encrypted content is base64 encoded.
dec := base64.NewDecoder(base64.StdEncoding, r)
box, err := ioutil.ReadAll(dec)
box, err := io.ReadAll(dec)
if err != nil {
return nil, fmt.Errorf("failed to load base64 encoded data: %w", err)
}
@@ -140,7 +139,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
for {
if envKeyFile := os.Getenv("_RCLONE_CONFIG_KEY_FILE"); len(envKeyFile) > 0 {
fs.Debugf(nil, "attempting to obtain configKey from temp file %s", envKeyFile)
obscuredKey, err := ioutil.ReadFile(envKeyFile)
obscuredKey, err := os.ReadFile(envKeyFile)
if err != nil {
errRemove := os.Remove(envKeyFile)
if errRemove != nil {
@@ -212,7 +211,7 @@ func Encrypt(src io.Reader, dst io.Writer) error {
var key [32]byte
copy(key[:], configKey[:32])
data, err := ioutil.ReadAll(src)
data, err := io.ReadAll(src)
if err != nil {
return err
}
@@ -256,7 +255,7 @@ func SetConfigPassword(password string) error {
}
configKey = sha.Sum(nil)
if PassConfigKeyForDaemonization {
tempFile, err := ioutil.TempFile("", "rclone")
tempFile, err := os.CreateTemp("", "rclone")
if err != nil {
return fmt.Errorf("cannot create temp file to store configKey: %w", err)
}

View File

@@ -146,18 +146,22 @@ func Float64VarP(flags *pflag.FlagSet, p *float64, name, shorthand string, value
// DurationP defines a flag which can be set by an environment variable
//
// It is a thin wrapper around pflag.DurationP
// It wraps the duration in an fs.Duration for extra suffixes and
// passes it to pflag.VarP
func DurationP(name, shorthand string, value time.Duration, usage string) (out *time.Duration) {
out = pflag.DurationP(name, shorthand, value, usage)
out = new(time.Duration)
*out = value
pflag.VarP((*fs.Duration)(out), name, shorthand, usage)
setValueFromEnv(pflag.CommandLine, name)
return out
}
// DurationVarP defines a flag which can be set by an environment variable
//
// It is a thin wrapper around pflag.DurationVarP
// It wraps the duration in an fs.Duration for extra suffixes and
// passes it to pflag.VarP
func DurationVarP(flags *pflag.FlagSet, p *time.Duration, name, shorthand string, value time.Duration, usage string) {
flags.DurationVarP(p, name, shorthand, value, usage)
flags.VarP((*fs.Duration)(p), name, shorthand, usage)
setValueFromEnv(flags, name)
}

View File

@@ -7,7 +7,6 @@ package config_test
import (
"context"
"fmt"
"io/ioutil"
"os"
"testing"
@@ -37,7 +36,7 @@ func testConfigFile(t *testing.T, options []fs.Option, configFileName string) fu
_ = os.Unsetenv("_RCLONE_CONFIG_KEY_FILE")
_ = os.Unsetenv("RCLONE_CONFIG_PASS")
// create temp config file
tempFile, err := ioutil.TempFile("", configFileName)
tempFile, err := os.CreateTemp("", configFileName)
assert.NoError(t, err)
path := tempFile.Name()
assert.NoError(t, tempFile.Close())

View File

@@ -3,7 +3,6 @@ package filter
import (
"context"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
@@ -30,7 +29,7 @@ func TestNewFilterDefault(t *testing.T) {
// testFile creates a temp file with the contents
func testFile(t *testing.T, contents string) string {
out, err := ioutil.TempFile("", "filter_test")
out, err := os.CreateTemp("", "filter_test")
require.NoError(t, err)
defer func() {
err := out.Close()

Some files were not shown because too many files have changed in this diff Show More