mirror of
https://github.com/rclone/rclone.git
synced 2025-12-16 00:04:40 +00:00
build: modernize Go usage
This commit modernizes Go usage. This was done with:
go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...
Then files needed to be `go fmt`ed and a few comments needed to be
restored.
The modernizations include replacing
- if/else conditional assignment by a call to the built-in min or max functions added in go1.21
- sort.Slice(x, func(i, j int) bool) { return s[i] < s[j] } by a call to slices.Sort(s), added in go1.21
- interface{} by the 'any' type added in go1.18
- append([]T(nil), s...) by slices.Clone(s) or slices.Concat(s), added in go1.21
- loop around an m[k]=v map update by a call to one of the Collect, Copy, Clone, or Insert functions from the maps package, added in go1.21
- []byte(fmt.Sprintf...) by fmt.Appendf(nil, ...), added in go1.19
- append(s[:i], s[i+1]...) by slices.Delete(s, i, i+1), added in go1.21
- a 3-clause for i := 0; i < n; i++ {} loop by for i := range n {}, added in go1.22
This commit is contained in:
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@@ -1092,7 +1092,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(entries); i++ {
|
||||
for i := range entries {
|
||||
innerDir, ok := entries[i].(fs.Directory)
|
||||
if ok {
|
||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||
@@ -1428,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
}()
|
||||
|
||||
// wait until both are done
|
||||
for c := 0; c < 2; c++ {
|
||||
for range 2 {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
@@ -1753,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
// Stats returns stats about the cache storage
|
||||
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
||||
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
||||
return f.cache.Stats()
|
||||
}
|
||||
|
||||
@@ -1933,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
||||
switch name {
|
||||
case "stats":
|
||||
return f.Stats()
|
||||
|
||||
16
backend/cache/cache_internal_test.go
vendored
16
backend/cache/cache_internal_test.go
vendored
@@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
for i := 0; i < len(checkSample); i++ {
|
||||
for i := range checkSample {
|
||||
require.Equal(t, testData[i], checkSample[i])
|
||||
}
|
||||
}
|
||||
@@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(readData); i++ {
|
||||
for i := range readData {
|
||||
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
||||
}
|
||||
}
|
||||
@@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
|
||||
for i := 0; i < 4; i++ { // read first 4
|
||||
for i := range 4 { // read first 4
|
||||
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
@@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
for range int(cnt) {
|
||||
data := randStringBytes(int(chunk))
|
||||
_, _ = f.Write(data)
|
||||
}
|
||||
@@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
||||
var err error
|
||||
var l []interface{}
|
||||
var l []any
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
@@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
||||
var err error
|
||||
var state cache.BackgroundUploadState
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
select {
|
||||
case state = <-buCh:
|
||||
// continue
|
||||
@@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
|
||||
|
||||
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
for range maxRetries {
|
||||
err = block()
|
||||
if err == nil {
|
||||
return nil
|
||||
|
||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
for i := range totalFiles {
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
|
||||
4
backend/cache/handle.go
vendored
4
backend/cache/handle.go
vendored
@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < r.workers; i++ {
|
||||
for i := range r.workers {
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
|
||||
8
backend/cache/plex.go
vendored
8
backend/cache/plex.go
vendored
@@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
var data map[string]any
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %w", err)
|
||||
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
func get(m any, path ...any) (any, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if mm, ok := m.(map[string]any); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
@@ -285,7 +285,7 @@ func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if mm, ok := m.([]any); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
|
||||
8
backend/cache/storage_persistent.go
vendored
8
backend/cache/storage_persistent.go
vendored
@@ -607,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
||||
}
|
||||
|
||||
// Stats returns a go map with the stats key values
|
||||
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
||||
r := make(map[string]map[string]interface{})
|
||||
r["data"] = make(map[string]interface{})
|
||||
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
||||
r := make(map[string]map[string]any)
|
||||
r["data"] = make(map[string]any)
|
||||
r["data"]["oldest-ts"] = time.Now()
|
||||
r["data"]["oldest-file"] = ""
|
||||
r["data"]["newest-ts"] = time.Now()
|
||||
r["data"]["newest-file"] = ""
|
||||
r["data"]["total-chunks"] = 0
|
||||
r["data"]["total-size"] = int64(0)
|
||||
r["files"] = make(map[string]interface{})
|
||||
r["files"] = make(map[string]any)
|
||||
r["files"]["oldest-ts"] = time.Now()
|
||||
r["files"]["oldest-name"] = ""
|
||||
r["files"]["newest-ts"] = time.Now()
|
||||
|
||||
Reference in New Issue
Block a user