1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Move all backends into backend directory

This commit is contained in:
Nick Craig-Wood
2018-01-11 16:05:41 +00:00
parent 0a7731cf0d
commit b8b620f5c2
143 changed files with 88 additions and 87 deletions

1017
backend/cache/cache.go vendored Normal file

File diff suppressed because it is too large Load Diff

758
backend/cache/cache_internal_test.go vendored Normal file
View File

@@ -0,0 +1,758 @@
// +build !plan9,go1.7
package cache_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"path"
"path/filepath"
"runtime"
"strconv"
"testing"
"time"
//"os"
"os/exec"
//"strings"
"github.com/ncw/rclone/backend/cache"
//"github.com/ncw/rclone/cmd/mount"
//_ "github.com/ncw/rclone/cmd/cmount"
//"github.com/ncw/rclone/cmd/mountlib"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
flag "github.com/spf13/pflag"
"github.com/stretchr/testify/require"
)
var (
infoAge = time.Second * 10
chunkClean = time.Second
okDiff = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
workers = 2
)
func TestInternalListRootAndInnerRemotes(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tilrair-local", "tilrair-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs
innerFolder := "inner"
err := rootFs.Mkdir(innerFolder)
require.NoError(t, err)
innerFs, err := fs.NewFs("tilrair-cache:" + innerFolder)
require.NoError(t, err)
obj := writeObjectString(t, innerFs, "one", "content")
listRoot, err := rootFs.List("")
require.NoError(t, err)
listRootInner, err := rootFs.List(innerFolder)
require.NoError(t, err)
listInner, err := innerFs.List("")
require.NoError(t, err)
require.Lenf(t, listRoot, 1, "remote %v should have 1 entry", rootFs.Root())
require.Lenf(t, listRootInner, 1, "remote %v should have 1 entry in %v", rootFs.Root(), innerFolder)
require.Lenf(t, listInner, 1, "remote %v should have 1 entry", innerFs.Root())
err = obj.Remove()
require.NoError(t, err)
}
func TestInternalObjWrapFsFound(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tiowff-local", "tiowff-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
wrappedFs := cfs.UnWrap()
data := "content"
writeObjectString(t, wrappedFs, "second", data)
listRoot, err := rootFs.List("")
require.NoError(t, err)
require.Lenf(t, listRoot, 1, "remote %v should have 1 entry", rootFs.Root())
co, err := rootFs.NewObject("second")
require.NoError(t, err)
r, err := co.Open()
require.NoError(t, err)
cachedData, err := ioutil.ReadAll(r)
require.NoError(t, err)
err = r.Close()
require.NoError(t, err)
strCached := string(cachedData)
require.Equal(t, data, strCached)
err = co.Remove()
require.NoError(t, err)
listRoot, err = wrappedFs.List("")
require.NoError(t, err)
require.Lenf(t, listRoot, 0, "remote %v should have 0 entries: %v", wrappedFs.Root(), listRoot)
}
func TestInternalObjNotFound(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tionf-local", "tionf-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject("404")
require.Error(t, err)
require.Nil(t, obj)
}
func TestInternalCachedWrittenContentMatches(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "ticwcm-local", "ticwcm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := make([]byte, (chunkSize*4 + chunkSize/2))
testSize, err := rand.Read(testData)
require.Equal(t, len(testData), testSize, "data size doesn't match")
require.NoError(t, err)
// write the object
o := writeObjectBytes(t, rootFs, "data.bin", testData)
require.Equal(t, o.Size(), int64(testSize))
// check sample of data from in-file
sampleStart := chunkSize / 2
sampleEnd := chunkSize
testSample := testData[sampleStart:sampleEnd]
checkSample := readDataFromObj(t, o, sampleStart, sampleEnd, false)
require.Equal(t, int64(len(checkSample)), sampleEnd-sampleStart)
require.Equal(t, checkSample, testSample)
}
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "ticucm-local", "ticucm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
// create some rand test data
testData1 := []byte(fstest.RandomString(100))
testData2 := []byte(fstest.RandomString(200))
// write the object
o := updateObjectBytes(t, rootFs, "data.bin", testData1, testData2)
require.Equal(t, o.Size(), int64(len(testData2)))
// check data from in-file
reader, err := o.Open()
require.NoError(t, err)
checkSample, err := ioutil.ReadAll(reader)
_ = reader.Close()
require.NoError(t, err)
require.Equal(t, checkSample, testData2)
}
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tiwwcm-local", "tiwwcm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := make([]byte, (chunkSize*4 + chunkSize/2))
testSize, err := rand.Read(testData)
require.Equal(t, len(testData), testSize)
require.NoError(t, err)
// write the object
o := writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
require.Equal(t, o.Size(), int64(testSize))
o2, err := rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, o2.Size(), o.Size())
// check sample of data from in-file
sampleStart := chunkSize / 2
sampleEnd := chunkSize
testSample := testData[sampleStart:sampleEnd]
checkSample := readDataFromObj(t, o2, sampleStart, sampleEnd, false)
require.Equal(t, len(checkSample), len(testSample))
for i := 0; i < len(checkSample); i++ {
require.Equal(t, testSample[i], checkSample[i])
}
}
func TestInternalLargeWrittenContentMatches(t *testing.T) {
t.Skip("FIXME disabled because it is unreliable")
rootFs, boltDb := newLocalCacheFs(t, "tilwcm-local", "tilwcm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := make([]byte, (chunkSize*10 + chunkSize/2))
testSize, err := rand.Read(testData)
require.Equal(t, len(testData), testSize)
require.NoError(t, err)
// write the object
o := writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
require.Equal(t, o.Size(), int64(testSize))
o2, err := rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, o2.Size(), o.Size())
// check data from in-file
checkSample := readDataFromObj(t, o2, int64(0), int64(testSize), false)
require.Equal(t, len(checkSample), len(testData))
for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i], "byte: %d (%d), chunk: %d", int64(i)%chunkSize, i, int64(i)/chunkSize)
}
}
func TestInternalLargeWrittenContentMatches2(t *testing.T) {
cryptFs, boltDb := newLocalCacheCryptFs(t, "tilwcm2-local", "tilwcm2-cache", "tilwcm2-crypt", true, nil)
defer cleanupFs(t, cryptFs, boltDb)
cfs, err := getCacheFs(cryptFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
fileSize := 87197196
readOffset := 87195648
// create some rand test data
testData := make([]byte, fileSize)
testSize, err := rand.Read(testData)
require.Equal(t, len(testData), testSize)
require.NoError(t, err)
// write the object
o := writeObjectBytes(t, cryptFs, "data.bin", testData)
require.Equal(t, o.Size(), int64(testSize))
o2, err := cryptFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, o2.Size(), o.Size())
// check data from in-file
reader, err := o2.Open(&fs.SeekOption{Offset: int64(readOffset)})
require.NoError(t, err)
rs, ok := reader.(io.Seeker)
require.True(t, ok)
checkOffset, err := rs.Seek(int64(readOffset), 0)
require.NoError(t, err)
require.Equal(t, checkOffset, int64(readOffset))
checkSample, err := ioutil.ReadAll(reader)
require.NoError(t, err)
_ = reader.Close()
require.Equal(t, len(checkSample), fileSize-readOffset)
for i := 0; i < fileSize-readOffset; i++ {
require.Equal(t, testData[readOffset+i], checkSample[i], "byte: %d (%d), chunk: %d", int64(i)%chunkSize, i, int64(i)/chunkSize)
}
}
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tiwfcns-local", "tiwfcns-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(co.Remote())
require.NoError(t, err)
err = o.SetModTime(co.ModTime().Truncate(time.Hour))
require.NoError(t, err)
// get a new instance from the cache
co2, err := rootFs.NewObject(o.Remote())
require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
require.Equal(t, co.ModTime().String(), co2.ModTime().String())
}
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "ticsadcf-local", "ticsadcf-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(co.Remote())
require.NoError(t, err)
err = o.SetModTime(co.ModTime().Add(-1 * time.Hour))
require.NoError(t, err)
// get a new instance from the cache
co2, err := rootFs.NewObject(o.Remote())
require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
require.Equal(t, co.ModTime().String(), co2.ModTime().String())
cfs.DirCacheFlush() // flush the cache
l, err := cfs.UnWrap().List("")
require.NoError(t, err)
require.Len(t, l, 1)
o2 := l[0]
// get a new instance from the cache
co, err = rootFs.NewObject(o.Remote())
require.NoError(t, err)
require.Equal(t, o2.ModTime().String(), co.ModTime().String())
}
func TestInternalCacheWrites(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "ticw-local", "ticw-cache", map[string]string{"cache-writes": "true"})
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
expectedTs := time.Now()
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), co.Remote()), 0)
require.NoError(t, err)
require.WithinDuration(t, expectedTs, ts, okDiff)
}
func TestInternalMaxChunkSizeRespected(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "timcsr-local", "timcsr-cache", map[string]string{"cache-workers": "1"})
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
totalChunks := 20
// create some rand test data
obj := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
o, err := rootFs.NewObject(obj.Remote())
require.NoError(t, err)
co, ok := o.(*cache.Object)
require.True(t, ok)
for i := 0; i < 4; i++ { // read first 4
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
// the last 2 **must** be in the cache
require.True(t, boltDb.HasChunk(co, chunkSize*2))
require.True(t, boltDb.HasChunk(co, chunkSize*3))
for i := 4; i < 6; i++ { // read next 2
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
}
cfs.CleanUpCache(true)
// the last 2 **must** be in the cache
require.True(t, boltDb.HasChunk(co, chunkSize*4))
require.True(t, boltDb.HasChunk(co, chunkSize*5))
}
func TestInternalExpiredEntriesRemoved(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tieer-local", "tieer-cache", map[string]string{"info_age": "5s"})
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
// create some rand test data
_ = writeObjectString(t, cfs, "one", "one content")
err = cfs.Mkdir("test")
require.NoError(t, err)
_ = writeObjectString(t, cfs, "test/second", "second content")
l, err := cfs.List("test")
require.NoError(t, err)
require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir("test/test2")
require.NoError(t, err)
l, err = cfs.List("test")
require.NoError(t, err)
require.Len(t, l, 1)
waitTime := time.Second * 5
t.Logf("Waiting %v seconds for cache to expire\n", waitTime)
time.Sleep(waitTime)
l, err = cfs.List("test")
require.NoError(t, err)
require.Len(t, l, 2)
}
// FIXME, enable this when mount is sorted out
//func TestInternalFilesMissingInMount1904(t *testing.T) {
// t.Skip("Not yet")
// if runtime.GOOS == "windows" {
// t.Skip("Not yet")
// }
// id := "tifm1904"
// rootFs, _ := newLocalCacheCryptFs(t, "test-local", "test-cache", "test-crypt", false,
// map[string]string{"chunk_size": "5M", "info_age": "1m", "chunk_total_size": "500M", "cache-writes": "true"})
// mntPoint := path.Join("/tmp", "tifm1904-mnt")
// testPoint := path.Join(mntPoint, id)
// checkOutput := "1 10 100 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 27 28 29 3 30 31 32 33 34 35 36 37 38 39 4 40 41 42 43 44 45 46 47 48 49 5 50 51 52 53 54 55 56 57 58 59 6 60 61 62 63 64 65 66 67 68 69 7 70 71 72 73 74 75 76 77 78 79 8 80 81 82 83 84 85 86 87 88 89 9 90 91 92 93 94 95 96 97 98 99 "
//
// _ = os.MkdirAll(mntPoint, os.ModePerm)
//
// list, err := rootFs.List("")
// require.NoError(t, err)
// found := false
// list.ForDir(func(d fs.Directory) {
// if strings.Contains(d.Remote(), id) {
// found = true
// }
// })
//
// if !found {
// t.Skip("Test folder '%v' doesn't exist", id)
// }
//
// mountFs(t, rootFs, mntPoint)
// defer unmountFs(t, mntPoint)
//
// for i := 1; i <= 2; i++ {
// out, err := exec.Command("ls", testPoint).Output()
// require.NoError(t, err)
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1))
// t.Logf("root path has all files")
// _ = writeObjectString(t, rootFs, path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"), "one content")
//
// for j := 1; j <= 100; j++ {
// out, err := exec.Command("ls", path.Join(testPoint, strconv.Itoa(j))).Output()
// require.NoError(t, err)
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1), "'%v' doesn't match", j)
// }
// obj, err := rootFs.NewObject(path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"))
// require.NoError(t, err)
// err = obj.Remove()
// require.NoError(t, err)
// t.Logf("folders contain all the files")
//
// out, err = exec.Command("date").Output()
// require.NoError(t, err)
// t.Logf("check #%v date: '%v'", i, strings.Replace(string(out), "\n", " ", -1))
//
// if i < 2 {
// time.Sleep(time.Second * 60)
// }
// }
//}
func writeObjectRandomBytes(t *testing.T, f fs.Fs, size int64) fs.Object {
remote := strconv.Itoa(rand.Int()) + ".bin"
// create some rand test data
testData := make([]byte, size)
testSize, err := rand.Read(testData)
require.Equal(t, size, int64(len(testData)))
require.Equal(t, size, int64(testSize))
require.NoError(t, err)
o := writeObjectBytes(t, f, remote, testData)
require.Equal(t, size, o.Size())
return o
}
func writeObjectString(t *testing.T, f fs.Fs, remote, content string) fs.Object {
return writeObjectBytes(t, f, remote, []byte(content))
}
func writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data)
modTime := time.Now()
objInfo := fs.NewStaticObjectInfo(remote, modTime, int64(len(data)), true, nil, f)
obj, err := f.Put(in, objInfo)
require.NoError(t, err)
return obj
}
func updateObjectBytes(t *testing.T, f fs.Fs, remote string, data1 []byte, data2 []byte) fs.Object {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := fs.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err := f.Put(in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(remote)
require.NoError(t, err)
err = obj.Update(in2, objInfo2)
return obj
}
func readDataFromObj(t *testing.T, co fs.Object, offset, end int64, useSeek bool) []byte {
var reader io.ReadCloser
var err error
size := end - offset
checkSample := make([]byte, size)
reader, err = co.Open(&fs.SeekOption{Offset: offset})
require.NoError(t, err)
totalRead, err := io.ReadFull(reader, checkSample)
require.NoError(t, err)
_ = reader.Close()
require.Equal(t, int64(totalRead), size, "wrong data read size from file")
return checkSample
}
func cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge()
require.NoError(t, err)
b.Close()
}
func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote string, purge bool, cfg map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
localExists := false
cacheExists := false
cryptExists := false
for _, s := range fs.ConfigFileSections() {
if s == localRemote {
localExists = true
}
if s == cacheRemote {
cacheExists = true
}
if s == cryptRemote {
cryptExists = true
}
}
localRemoteWrap := ""
if !localExists {
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
fs.ConfigFileSet(localRemote, "type", "local")
fs.ConfigFileSet(localRemote, "nounc", "true")
}
if !cacheExists {
fs.ConfigFileSet(cacheRemote, "type", "cache")
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
}
if c, ok := cfg["chunk_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
}
if c, ok := cfg["chunk_total_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
}
if c, ok := cfg["info_age"]; ok {
fs.ConfigFileSet(cacheRemote, "info_age", c)
} else {
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
}
if !cryptExists {
t.Skipf("Skipping due to missing crypt remote: %v", cryptRemote)
}
if c, ok := cfg["cache-chunk-no-memory"]; ok {
_ = flag.Set("cache-chunk-no-memory", c)
} else {
_ = flag.Set("cache-chunk-no-memory", "true")
}
if c, ok := cfg["cache-workers"]; ok {
_ = flag.Set("cache-workers", c)
} else {
_ = flag.Set("cache-workers", strconv.Itoa(workers))
}
if c, ok := cfg["cache-chunk-clean-interval"]; ok {
_ = flag.Set("cache-chunk-clean-interval", c)
} else {
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
}
if c, ok := cfg["cache-writes"]; ok {
_ = flag.Set("cache-writes", c)
} else {
_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
}
// Instantiate root
f, err := fs.NewFs(cryptRemote + ":")
require.NoError(t, err)
if purge {
_ = f.Features().Purge()
require.NoError(t, err)
}
err = f.Mkdir("")
require.NoError(t, err)
return f, boltDb
}
func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
chunkPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote)
boltDb, err := cache.GetPersistent(dbPath, chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
localExists := false
cacheExists := false
for _, s := range fs.ConfigFileSections() {
if s == localRemote {
localExists = true
}
if s == cacheRemote {
cacheExists = true
}
}
localRemoteWrap := ""
if !localExists {
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
fs.ConfigFileSet(localRemote, "type", "local")
fs.ConfigFileSet(localRemote, "nounc", "true")
}
if !cacheExists {
fs.ConfigFileSet(cacheRemote, "type", "cache")
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
}
if c, ok := cfg["chunk_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
}
if c, ok := cfg["chunk_total_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
}
if c, ok := cfg["info_age"]; ok {
fs.ConfigFileSet(cacheRemote, "info_age", c)
} else {
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
}
if c, ok := cfg["cache-chunk-no-memory"]; ok {
_ = flag.Set("cache-chunk-no-memory", c)
} else {
_ = flag.Set("cache-chunk-no-memory", "true")
}
if c, ok := cfg["cache-workers"]; ok {
_ = flag.Set("cache-workers", c)
} else {
_ = flag.Set("cache-workers", strconv.Itoa(workers))
}
if c, ok := cfg["cache-chunk-clean-interval"]; ok {
_ = flag.Set("cache-chunk-clean-interval", c)
} else {
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
}
if c, ok := cfg["cache-writes"]; ok {
_ = flag.Set("cache-writes", c)
} else {
_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
}
// Instantiate root
f, err := fs.NewFs(cacheRemote + ":")
require.NoError(t, err)
_ = f.Features().Purge()
require.NoError(t, err)
err = f.Mkdir("")
require.NoError(t, err)
return f, boltDb
}
//func mountFs(t *testing.T, f fs.Fs, mntPoint string) {
// if runtime.GOOS == "windows" {
// t.Skip("Skipping test cause on windows")
// return
// }
//
// _ = flag.Set("debug-fuse", "false")
//
// go func() {
// mountlib.DebugFUSE = false
// mountlib.AllowOther = true
// mount.Mount(f, mntPoint)
// }()
//
// time.Sleep(time.Second * 3)
//}
func unmountFs(t *testing.T, mntPoint string) {
var out []byte
var err error
if runtime.GOOS == "windows" {
t.Skip("Skipping test cause on windows")
return
} else if runtime.GOOS == "linux" {
out, err = exec.Command("fusermount", "-u", mntPoint).Output()
} else if runtime.GOOS == "darwin" {
out, err = exec.Command("diskutil", "unmount", mntPoint).Output()
}
t.Logf("Unmount output: %v", string(out))
require.NoError(t, err)
}
func getCacheFs(f fs.Fs) (*cache.Fs, error) {
cfs, ok := f.(*cache.Fs)
if ok {
return cfs, nil
} else {
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
}
}
}
return nil, fmt.Errorf("didn't found a cache fs")
}
var (
_ fs.Fs = (*cache.Fs)(nil)
_ fs.Fs = (*local.Fs)(nil)
)

77
backend/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,77 @@
// Test Cache filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
// +build !plan9,go1.7
package cache_test
import (
"testing"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*cache.Object)(nil))
fstests.RemoteName = "TestCache:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

6
backend/cache/cache_unsupported.go vendored Normal file
View File

@@ -0,0 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 !go1.7
package cache

138
backend/cache/directory.go vendored Normal file
View File

@@ -0,0 +1,138 @@
// +build !plan9,go1.7
package cache
import (
"time"
"path"
"github.com/ncw/rclone/fs"
)
// Directory is a generic dir that stores basic information about it
type Directory struct {
fs.Directory `json:"-"`
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the directory
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheItems int64 `json:"items"` // number of objects or -1 for unknown
CacheType string `json:"cacheType"` // object type
CacheTs *time.Time `json:",omitempty"`
}
// NewDirectory builds an empty dir which will be used to unmarshal data in it
func NewDirectory(f *Fs, remote string) *Directory {
cd := ShallowDirectory(f, remote)
t := time.Now()
cd.CacheTs = &t
return cd
}
// ShallowDirectory builds an empty dir which will be used to unmarshal data in it
func ShallowDirectory(f *Fs, remote string) *Directory {
var cd *Directory
fullRemote := cleanPath(path.Join(f.Root(), remote))
// build a new one
dir := cleanPath(path.Dir(fullRemote))
name := cleanPath(path.Base(fullRemote))
cd = &Directory{
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheItems: 0,
CacheType: "Directory",
}
return cd
}
// DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote())
dir := cleanPath(path.Dir(fullRemote))
name := cleanPath(path.Base(fullRemote))
t := time.Now()
cd = &Directory{
Directory: d,
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(),
CacheItems: d.Items(),
CacheType: "Directory",
CacheTs: &t,
}
return cd
}
// Fs returns its FS info
func (d *Directory) Fs() fs.Info {
return d.CacheFs
}
// String returns a human friendly name for this object
func (d *Directory) String() string {
if d == nil {
return "<nil>"
}
return d.Remote()
}
// Remote returns the remote path
func (d *Directory) Remote() string {
p := cleanPath(path.Join(d.Dir, d.Name))
if d.CacheFs.Root() != "" {
p = p[len(d.CacheFs.Root()):] // trim out root
if len(p) > 0 { // remove first separator
p = p[1:]
}
}
return p
}
// abs returns the absolute path to the dir
func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name))
}
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime)
}
// Size returns the cached Size
func (d *Directory) Size() int64 {
return d.CacheSize
}
// Items returns the cached Items
func (d *Directory) Items() int64 {
return d.CacheItems
}
var (
_ fs.Directory = (*Directory)(nil)
)

506
backend/cache/handle.go vendored Normal file
View File

@@ -0,0 +1,506 @@
// +build !plan9,go1.7
package cache
import (
"fmt"
"io"
"os"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Handle is managing the read/write/seek operations on an open handle
type Handle struct {
cachedObject *Object
memory ChunkStorage
preloadQueue chan int64
preloadOffset int64
offset int64
seenOffsets map[int64]bool
mu sync.Mutex
confirmReading chan bool
UseMemory bool
workers []*worker
closed bool
reading bool
}
// NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(o *Object) *Handle {
r := &Handle{
cachedObject: o,
offset: 0,
preloadOffset: -1, // -1 to trigger the first preload
UseMemory: o.CacheFs.chunkMemory,
reading: false,
}
r.seenOffsets = make(map[int64]bool)
r.memory = NewMemory(-1)
// create a larger buffer to queue up requests
r.preloadQueue = make(chan int64, o.CacheFs.totalWorkers*10)
r.confirmReading = make(chan bool)
r.startReadWorkers()
return r
}
// cacheFs is a convenience method to get the parent cache FS of the object's manager
func (r *Handle) cacheFs() *Fs {
return r.cachedObject.CacheFs
}
// storage is a convenience method to get the persistent storage of the object's manager
func (r *Handle) storage() Storage {
return r.cacheFs().cache
}
// String representation of this reader
func (r *Handle) String() string {
return r.cachedObject.abs()
}
// startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() {
if r.hasAtLeastOneWorker() {
return
}
totalWorkers := r.cacheFs().totalWorkers
if r.cacheFs().plexConnector.isConfigured() {
if !r.cacheFs().plexConnector.isConnected() {
err := r.cacheFs().plexConnector.authenticate()
if err != nil {
fs.Infof(r, "failed to authenticate to Plex: %v", err)
}
}
if r.cacheFs().plexConnector.isConnected() {
totalWorkers = 1
}
}
r.scaleWorkers(totalWorkers)
}
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := len(r.workers)
if current == desired {
return
}
if current > desired {
// scale in gracefully
for i := 0; i < current-desired; i++ {
r.preloadQueue <- -1
}
} else {
// scale out
for i := 0; i < desired-current; i++ {
w := &worker{
r: r,
ch: r.preloadQueue,
id: current + i,
}
go w.run()
r.workers = append(r.workers, w)
}
}
// ignore first scale out from 0
if current != 0 {
fs.Infof(r, "scale workers to %v", desired)
}
}
func (r *Handle) requestExternalConfirmation() {
// if there's no external confirmation available
// then we skip this step
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
!r.cacheFs().plexConnector.isConnected() {
return
}
go r.cacheFs().plexConnector.isPlayingAsync(r.cachedObject, r.confirmReading)
}
func (r *Handle) confirmExternalReading() {
// if we have a max value of workers
// or there's no external confirmation available
// then we skip this step
if len(r.workers) >= r.cacheFs().totalMaxWorkers ||
!r.cacheFs().plexConnector.isConnected() {
return
}
select {
case confirmed := <-r.confirmReading:
if !confirmed {
return
}
default:
return
}
fs.Infof(r, "confirmed reading by external reader")
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
}
// queueOffset will send an offset to the workers if it's different from the last one
func (r *Handle) queueOffset(offset int64) {
if offset != r.preloadOffset {
// clean past in-memory chunks
if r.UseMemory {
go r.memory.CleanChunksByNeed(offset)
}
go r.cacheFs().CleanUpCache(false)
r.confirmExternalReading()
r.preloadOffset = offset
// clear the past seen chunks
// they will remain in our persistent storage but will be removed from transient
// so they need to be picked up by a worker
for k := range r.seenOffsets {
if k < offset {
r.seenOffsets[k] = false
}
}
for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
}
if v, ok := r.seenOffsets[o]; ok && v {
continue
}
r.seenOffsets[o] = true
r.preloadQueue <- o
}
r.requestExternalConfirmation()
}
}
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
var data []byte
var err error
// we calculate the modulus of the requested offset with the size of a chunk
offset := chunkStart % r.cacheFs().chunkSize
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart = chunkStart - offset
r.queueOffset(chunkStart)
found := false
if r.UseMemory {
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
}
}
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := 0; i < r.cacheFs().readRetries*2; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
break
}
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
time.Sleep(time.Millisecond * 500)
}
}
// not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found {
if !r.hasAtLeastOneWorker() {
fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF
}
return nil, errors.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
if offset > 0 {
if offset >= int64(len(data)) {
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
return nil, io.ErrUnexpectedEOF
}
data = data[int(offset):]
}
return data, nil
}
// Read a chunk from storage or len(p)
func (r *Handle) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
var buf []byte
// first reading
if !r.reading {
r.reading = true
r.requestExternalConfirmation()
}
// reached EOF
if r.offset >= r.cachedObject.Size() {
return 0, io.EOF
}
currentOffset := r.offset
buf, err = r.getChunk(currentOffset)
if err != nil && len(buf) == 0 {
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
return 0, io.EOF
}
readSize := copy(p, buf)
newOffset := currentOffset + int64(readSize)
r.offset = newOffset
return readSize, err
}
// Close will tell the workers to stop
func (r *Handle) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed {
return errors.New("file already closed")
}
close(r.preloadQueue)
r.closed = true
// wait for workers to complete their jobs before returning
waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
go r.cacheFs().CleanUpCache(false)
fs.Debugf(r, "cache reader closed %v", r.offset)
return nil
}
// Seek will move the current offset based on whence and instruct the workers to move there too
func (r *Handle) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
var err error
switch whence {
case os.SEEK_SET:
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
r.offset = offset
case os.SEEK_CUR:
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
r.offset += offset
case os.SEEK_END:
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
if chunkStart >= r.cacheFs().chunkSize {
chunkStart = chunkStart - r.cacheFs().chunkSize
}
r.queueOffset(chunkStart)
return r.offset, err
}
type worker struct {
r *Handle
ch <-chan int64
rc io.ReadCloser
id int
running bool
mu sync.Mutex
}
// String is a representation of this worker
func (w *worker) String() string {
return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name)
}
// reader will return a reader depending on the capabilities of the source reader:
// - if it supports seeking it will seek to the desired offset and return the same reader
// - if it doesn't support seeking it will close a possible existing one and open at the desired offset
// - if there's no reader associated with this worker, it will create one
func (w *worker) reader(offset, end int64) (io.ReadCloser, error) {
var err error
r := w.rc
if w.rc == nil {
r, err = w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
})
if err != nil {
return nil, err
}
return r, nil
}
seekerObj, ok := r.(io.Seeker)
if ok {
_, err = seekerObj.Seek(offset, os.SEEK_SET)
return r, err
}
_ = w.rc.Close()
return w.r.cacheFs().OpenRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(&fs.SeekOption{Offset: offset}, &fs.RangeOption{Start: offset, End: end})
if err != nil {
return nil, err
}
return r, nil
})
}
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload
func (w *worker) run() {
var err error
var data []byte
defer w.setRunning(false)
defer func() {
if w.rc != nil {
_ = w.rc.Close()
w.setRunning(false)
}
}()
for {
chunkStart, open := <-w.ch
w.setRunning(true)
if chunkStart < 0 || !open {
break
}
// skip if it exists
if w.r.UseMemory {
if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) {
continue
}
// add it in ram if it's in the persistent storage
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
if err == nil {
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
} else {
continue
}
}
err = nil
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
// TODO: Remove this comment if it proves to be reliable for #1896
//if chunkEnd > w.r.cachedObject.Size() {
// chunkEnd = w.r.cachedObject.Size()
//}
w.download(chunkStart, chunkEnd, 0)
}
}
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
var err error
var data []byte
// stop retries
if retry >= w.r.cacheFs().readRetries {
return
}
// back-off between retries
if retry > 0 {
time.Sleep(time.Second * time.Duration(retry))
}
w.rc, err = w.reader(chunkStart, chunkEnd)
// we seem to be getting only errors so we abort
if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
w.download(chunkStart, chunkEnd, retry+1)
return
}
data = make([]byte, chunkEnd-chunkStart)
sourceRead := 0
sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
w.download(chunkStart, chunkEnd, retry+1)
return
}
data = data[:sourceRead] // reslice to remove extra garbage
if err == io.ErrUnexpectedEOF {
fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
} else {
fs.Debugf(w, "downloaded chunk %v", fs.SizeSuffix(chunkStart))
}
if w.r.UseMemory {
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
}
}
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
}
}
// Check the interfaces are satisfied
var (
_ io.ReadCloser = (*Handle)(nil)
_ io.Seeker = (*Handle)(nil)
)

315
backend/cache/object.go vendored Normal file
View File

@@ -0,0 +1,315 @@
// +build !plan9,go1.7
package cache
import (
"encoding/json"
"io"
"os"
"path"
"sync"
"time"
"strconv"
"github.com/ncw/rclone/fs"
)
// Object is a generic file like object that stores basic information about it
type Object struct {
fs.Object `json:"-"`
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
cacheHashes map[fs.HashType]string // all supported hashes cached
refreshMutex sync.Mutex
}
// NewObject builds one from a generic fs.Object
func NewObject(f *Fs, remote string) *Object { //0745 379 768
fullRemote := path.Join(f.Root(), remote)
dir, name := path.Split(fullRemote)
co := &Object{
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheStorable: false,
CacheType: "Object",
CacheTs: time.Now(),
}
return co
}
// MarshalJSON is needed to override the hashes map (needed to support older versions of Go)
func (o *Object) MarshalJSON() ([]byte, error) {
hashes := make(map[string]string)
for k, v := range o.cacheHashes {
hashes[strconv.Itoa(int(k))] = v
}
type Alias Object
return json.Marshal(&struct {
Hashes map[string]string `json:"hashes"`
*Alias
}{
Alias: (*Alias)(o),
Hashes: hashes,
})
}
// UnmarshalJSON is needed to override the CacheHashes map (needed to support older versions of Go)
func (o *Object) UnmarshalJSON(b []byte) error {
type Alias Object
aux := &struct {
Hashes map[string]string `json:"hashes"`
*Alias
}{
Alias: (*Alias)(o),
}
if err := json.Unmarshal(b, &aux); err != nil {
return err
}
o.cacheHashes = make(map[fs.HashType]string)
for k, v := range aux.Hashes {
ht, _ := strconv.Atoi(k)
o.cacheHashes[fs.HashType(ht)] = v
}
return nil
}
// ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote)
co = &Object{
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheType: "Object",
CacheTs: time.Now(),
}
co.updateData(o)
return co
}
func (o *Object) updateData(source fs.Object) {
o.Object = source
o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.cacheHashes = make(map[fs.HashType]string)
}
// Fs returns its FS info
func (o *Object) Fs() fs.Info {
return o.CacheFs
}
// String returns a human friendly name for this object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
p := path.Join(o.Dir, o.Name)
if o.CacheFs.Root() != "" {
p = p[len(o.CacheFs.Root()):] // trim out root
if len(p) > 0 { // remove first separator
p = p[1:]
}
}
return p
}
// abs returns the absolute path to the object
func (o *Object) abs() string {
return path.Join(o.Dir, o.Name)
}
// parentRemote returns the absolute path parent remote
func (o *Object) parentRemote() string {
absPath := o.abs()
return cleanPath(path.Dir(absPath))
}
// parentDir returns the absolute path parent remote
func (o *Object) parentDir() *Directory {
return NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
}
// ModTime returns the cached ModTime
func (o *Object) ModTime() time.Time {
return time.Unix(0, o.CacheModTime)
}
// Size returns the cached Size
func (o *Object) Size() int64 {
return o.CacheSize
}
// Storable returns the cached Storable
func (o *Object) Storable() bool {
return o.CacheStorable
}
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource() error {
o.refreshMutex.Lock()
defer o.refreshMutex.Unlock()
if o.Object != nil {
return nil
}
liveObject, err := o.CacheFs.Fs.NewObject(o.Remote())
if err != nil {
fs.Errorf(o, "error refreshing object: %v", err)
return err
}
o.updateData(liveObject)
o.persist()
return nil
}
// SetModTime sets the ModTime of this object
func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(); err != nil {
return err
}
err := o.Object.SetModTime(t)
if err != nil {
return err
}
o.CacheModTime = t.UnixNano()
o.persist()
fs.Debugf(o.Fs(), "updated ModTime %v: %v", o, t)
return nil
}
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
if err := o.refreshFromSource(); err != nil {
return nil, err
}
var err error
cacheReader := NewObjectHandle(o)
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
_, err = cacheReader.Seek(x.Offset, os.SEEK_SET)
case *fs.RangeOption:
_, err = cacheReader.Seek(x.Start, os.SEEK_SET)
}
if err != nil {
return cacheReader, err
}
}
return cacheReader, nil
}
// Update will change the object data
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(); err != nil {
return err
}
fs.Infof(o, "updating object contents with size %v", src.Size())
// deleting cached chunks and info to be replaced with new ones
_ = o.CacheFs.cache.RemoveObject(o.abs())
err := o.Object.Update(in, src, options...)
if err != nil {
fs.Errorf(o, "error updating source: %v", err)
return err
}
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
o.cacheHashes = make(map[fs.HashType]string)
o.persist()
return nil
}
// Remove deletes the object from both the cache and the source
func (o *Object) Remove() error {
if err := o.refreshFromSource(); err != nil {
return err
}
err := o.Object.Remove()
if err != nil {
return err
}
fs.Infof(o, "removing object")
_ = o.CacheFs.cache.RemoveObject(o.abs())
return err
}
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ht fs.HashType) (string, error) {
if o.cacheHashes == nil {
o.cacheHashes = make(map[fs.HashType]string)
}
cachedHash, found := o.cacheHashes[ht]
if found {
return cachedHash, nil
}
if err := o.refreshFromSource(); err != nil {
return "", err
}
liveHash, err := o.Object.Hash(ht)
if err != nil {
return "", err
}
o.cacheHashes[ht] = liveHash
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)
return liveHash, nil
}
// persist adds this object to the persistent cache
func (o *Object) persist() *Object {
err := o.CacheFs.cache.AddObject(o)
if err != nil {
fs.Errorf(o, "failed to cache object: %v", err)
}
return o
}
var (
_ fs.Object = (*Object)(nil)
)

244
backend/cache/plex.go vendored Normal file
View File

@@ -0,0 +1,244 @@
// +build !plan9,go1.7
package cache
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"sync"
"github.com/ncw/rclone/fs"
)
const (
// defPlexLoginURL is the default URL for Plex login
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
)
// plexConnector is managing the cache integration with Plex
type plexConnector struct {
url *url.URL
username string
password string
token string
f *Fs
mu sync.Mutex
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
}
pc := &plexConnector{
f: f,
url: u,
username: username,
password: password,
token: "",
}
return pc, nil
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
}
pc := &plexConnector{
f: f,
url: u,
token: token,
}
return pc, nil
}
// fillDefaultHeaders will add common headers to requests
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
req.Header.Add("X-Plex-Version", fs.Version)
req.Header.Add("Accept", "application/json")
if p.token != "" {
req.Header.Add("X-Plex-Token", p.token)
}
}
// authenticate will generate a token based on a username/password
func (p *plexConnector) authenticate() error {
p.mu.Lock()
defer p.mu.Unlock()
form := url.Values{}
form.Set("user[login]", p.username)
form.Add("user[password]", p.password)
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
if err != nil {
return err
}
p.fillDefaultHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %v", err)
}
tokenGen, ok := get(data, "user", "authToken")
if !ok {
return fmt.Errorf("failed to obtain token: %v", data)
}
token, ok := tokenGen.(string)
if !ok {
return fmt.Errorf("failed to obtain token: %v", data)
}
p.token = token
if p.token != "" {
fs.ConfigFileSet(p.f.Name(), "plex_token", p.token)
fs.SaveConfig()
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
}
return nil
}
// isConnected checks if this rclone is authenticated to Plex
func (p *plexConnector) isConnected() bool {
return p.token != ""
}
// isConfigured checks if this rclone is configured to use a Plex server
func (p *plexConnector) isConfigured() bool {
return p.url != nil
}
func (p *plexConnector) isPlaying(co *Object) bool {
isPlaying := false
req, err := http.NewRequest("GET", fmt.Sprintf("%s/status/sessions", p.url.String()), nil)
if err != nil {
return false
}
p.fillDefaultHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false
}
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return false
}
sizeGen, ok := get(data, "MediaContainer", "size")
if !ok {
return false
}
size, ok := sizeGen.(float64)
if !ok || size < float64(1) {
return false
}
videosGen, ok := get(data, "MediaContainer", "Video")
if !ok {
fs.Errorf("plex", "empty videos: %v", data)
return false
}
videos, ok := videosGen.([]interface{})
if !ok || len(videos) < 1 {
fs.Errorf("plex", "empty videos: %v", data)
return false
}
for _, v := range videos {
keyGen, ok := get(v, "key")
if !ok {
fs.Errorf("plex", "failed to find: key")
continue
}
key, ok := keyGen.(string)
if !ok {
fs.Errorf("plex", "failed to understand: key")
continue
}
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), key), nil)
if err != nil {
return false
}
p.fillDefaultHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false
}
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return false
}
remote := co.Remote()
if cr, yes := co.CacheFs.isWrappedByCrypt(); yes {
remote, err = cr.DecryptFileName(co.Remote())
if err != nil {
fs.Errorf("plex", "can not decrypt wrapped file: %v", err)
continue
}
}
fpGen, ok := get(data, "MediaContainer", "Metadata", 0, "Media", 0, "Part", 0, "file")
if !ok {
fs.Errorf("plex", "failed to understand: %v", data)
continue
}
fp, ok := fpGen.(string)
if !ok {
fs.Errorf("plex", "failed to understand: %v", fp)
continue
}
if strings.Contains(fp, remote) {
isPlaying = true
break
}
}
return isPlaying
}
func (p *plexConnector) isPlayingAsync(co *Object, response chan bool) {
time.Sleep(time.Second) // FIXME random guess here
res := p.isPlaying(co)
response <- res
}
// adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path {
switch idx := p.(type) {
case string:
if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found {
m = val
continue
}
}
return nil, false
case int:
if mm, ok := m.([]interface{}); ok {
if len(mm) > idx {
m = mm[idx]
continue
}
}
return nil, false
}
}
return m, true
}

100
backend/cache/storage_memory.go vendored Normal file
View File

@@ -0,0 +1,100 @@
// +build !plan9,go1.7
package cache
import (
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
)
// Memory is a wrapper of transient storage for a go-cache store
type Memory struct {
ChunkStorage
db *cache.Cache
}
// NewMemory builds this cache storage
// defaultExpiration will set the expiry time of chunks in this storage
func NewMemory(defaultExpiration time.Duration) *Memory {
mem := &Memory{}
err := mem.Connect(defaultExpiration)
if err != nil {
fs.Errorf("cache", "can't open ram connection: %v", err)
}
return mem
}
// Connect will create a connection for the storage
func (m *Memory) Connect(defaultExpiration time.Duration) error {
m.db = cache.New(defaultExpiration, -1)
return nil
}
// HasChunk confirms the existence of a single chunk of an object
func (m *Memory) HasChunk(cachedObject *Object, offset int64) bool {
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
_, found := m.db.Get(key)
return found
}
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
var data []byte
if x, found := m.db.Get(key); found {
data = x.([]byte)
return data, nil
}
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object
func (m *Memory) AddChunk(fp string, data []byte, offset int64) error {
return m.AddChunkAhead(fp, data, offset, time.Second)
}
// AddChunkAhead adds a new chunk of a cached object
func (m *Memory) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error {
key := fp + "-" + strconv.FormatInt(offset, 10)
m.db.Set(key, data, cache.DefaultExpiration)
return nil
}
// CleanChunksByAge will cleanup on a cron basis
func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
m.db.DeleteExpired()
}
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
func (m *Memory) CleanChunksByNeed(offset int64) {
var items map[string]cache.Item
items = m.db.Items()
for key := range items {
sepIdx := strings.LastIndex(key, "-")
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
if err != nil {
fs.Errorf("cache", "couldn't parse offset entry %v", key)
continue
}
if keyOffset < offset {
m.db.Delete(key)
}
}
}
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
func (m *Memory) CleanChunksBySize(maxSize int64) {
// NOOP
}

721
backend/cache/storage_persistent.go vendored Normal file
View File

@@ -0,0 +1,721 @@
// +build !plan9,go1.7
package cache
import (
"time"
"bytes"
"encoding/binary"
"encoding/json"
"os"
"path"
"strconv"
"strings"
"sync"
"io/ioutil"
bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Constants
const (
RootBucket = "root"
RootTsBucket = "rootTs"
DataTsBucket = "dataTs"
)
// Features flags for this storage type
type Features struct {
PurgeDb bool // purge the db before starting
}
var boltMap = make(map[string]*Persistent)
var boltMapMx sync.Mutex
// GetPersistent returns a single instance for the specific store
func GetPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
// write lock to create one
boltMapMx.Lock()
defer boltMapMx.Unlock()
if b, ok := boltMap[dbPath]; ok {
return b, nil
}
bb, err := newPersistent(dbPath, chunkPath, f)
if err != nil {
return nil, err
}
boltMap[dbPath] = bb
return boltMap[dbPath], nil
}
type chunkInfo struct {
Path string
Offset int64
Size int64
}
// Persistent is a wrapper of persistent storage for a bolt.DB file
type Persistent struct {
Storage
dbPath string
dataPath string
db *bolt.DB
cleanupMux sync.Mutex
features *Features
}
// newPersistent builds a new wrapper and connects to the bolt.DB file
func newPersistent(dbPath, chunkPath string, f *Features) (*Persistent, error) {
b := &Persistent{
dbPath: dbPath,
dataPath: chunkPath,
features: f,
}
err := b.Connect()
if err != nil {
fs.Errorf(dbPath, "Error opening storage cache. Is there another rclone running on the same remote? %v", err)
return nil, err
}
return b, nil
}
// String will return a human friendly string for this DB (currently the dbPath)
func (b *Persistent) String() string {
return "<Cache DB> " + b.dbPath
}
// Connect creates a connection to the configured file
// refreshDb will delete the file before to create an empty DB if it's set to true
func (b *Persistent) Connect() error {
var db *bolt.DB
var err error
if b.features.PurgeDb {
err := os.Remove(b.dbPath)
if err != nil {
fs.Errorf(b, "failed to remove cache file: %v", err)
}
err = os.RemoveAll(b.dataPath)
if err != nil {
fs.Errorf(b, "failed to remove cache data: %v", err)
}
}
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
}
db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: 1 * time.Second})
if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
}
_ = db.Update(func(tx *bolt.Tx) error {
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket))
return nil
})
b.db = db
return nil
}
// getBucket prepares and cleans a specific path of the form: /var/tmp and will iterate through each path component
// to get to the nested bucket of the final part (in this example: tmp)
func (b *Persistent) getBucket(dir string, createIfMissing bool, tx *bolt.Tx) *bolt.Bucket {
cleanPath(dir)
entries := strings.FieldsFunc(dir, func(c rune) bool {
return os.PathSeparator == c
})
bucket := tx.Bucket([]byte(RootBucket))
for _, entry := range entries {
if createIfMissing {
bucket, _ = bucket.CreateBucketIfNotExists([]byte(entry))
} else {
bucket = bucket.Bucket([]byte(entry))
}
if bucket == nil {
return nil
}
}
return bucket
}
// AddDir will update a CachedDirectory metadata and all its entries
func (b *Persistent) AddDir(cachedDir *Directory) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), true, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir)
}
encoded, err := json.Marshal(cachedDir)
if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = bucket.Put([]byte("."), encoded)
if err != nil {
return err
}
return nil
})
}
// GetDirEntries will return a CachedDirectory, its list of dir entries and/or an error if it encountered issues
func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error) {
var dirEntries fs.DirEntries
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
}
val := bucket.Get([]byte("."))
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
return errors.Errorf("error during unmarshalling obj: %v", err)
}
} else {
return errors.Errorf("missing cached dir: %v", cachedDir)
}
c := bucket.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
// ignore metadata key: .
if bytes.Equal(k, []byte(".")) {
continue
}
entryPath := path.Join(cachedDir.Remote(), string(k))
if v == nil { // directory
// we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil {
return errors.Errorf("couldn't open bucket (%v)", string(k))
}
metaKey := currentBucket.Get([]byte("."))
d := NewDirectory(cachedDir.CacheFs, entryPath)
if metaKey != nil { //if we don't find it, we create an empty dir
err := json.Unmarshal(metaKey, d)
if err != nil { // if even this fails, we fallback to an empty dir
fs.Debugf(string(k), "error during unmarshalling obj: %v", err)
}
}
dirEntries = append(dirEntries, d)
} else { // object
o := NewObject(cachedDir.CacheFs, entryPath)
err := json.Unmarshal(v, o)
if err != nil {
fs.Debugf(string(k), "error during unmarshalling obj: %v", err)
continue
}
dirEntries = append(dirEntries, o)
}
}
return nil
})
return dirEntries, err
}
// RemoveDir will delete a CachedDirectory, all its objects and all the chunks stored for it
func (b *Persistent) RemoveDir(fp string) error {
var err error
parentDir, dirName := path.Split(fp)
if fp == "" {
err = b.db.Update(func(tx *bolt.Tx) error {
err := tx.DeleteBucket([]byte(RootBucket))
if err != nil {
fs.Debugf(fp, "couldn't delete from cache: %v", err)
return err
}
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
return nil
})
} else {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", fp)
}
// delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
if err != nil {
fs.Debugf(fp, "couldn't delete from cache: %v", err)
}
return nil
})
}
// delete chunks on disk
// safe to ignore as the files might not have been open
if err == nil {
_ = os.RemoveAll(path.Join(b.dataPath, fp))
_ = os.MkdirAll(b.dataPath, os.ModePerm)
}
return err
}
// ExpireDir will flush a CachedDirectory and all its objects from the objects
// chunks will remain as they are
func (b *Persistent) ExpireDir(cd *Directory) error {
t := time.Now().Add(cd.CacheFs.fileAge * -1)
cd.CacheTs = &t
// expire all parents
return b.db.Update(func(tx *bolt.Tx) error {
// expire all the parents
currentDir := cd.abs()
for { // until we get to the root
bucket := b.getBucket(currentDir, false, tx)
if bucket != nil {
val := bucket.Get([]byte("."))
if val != nil {
cd2 := &Directory{CacheFs: cd.CacheFs}
err := json.Unmarshal(val, cd2)
if err == nil {
fs.Debugf(cd, "cache: expired %v", currentDir)
cd2.CacheTs = &t
enc2, _ := json.Marshal(cd2)
_ = bucket.Put([]byte("."), enc2)
}
}
}
if currentDir == "" {
break
}
currentDir = cleanPath(path.Dir(currentDir))
}
return nil
})
}
// GetObject will return a CachedObject from its parent directory or an error if it doesn't find it
func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
}
val := bucket.Get([]byte(cachedObject.Name))
if val != nil {
return json.Unmarshal(val, cachedObject)
}
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
})
}
// AddObject will create a cached object in its parent directory
func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
}
// cache Object Info
encoded, err := json.Marshal(cachedObject)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
return nil
})
}
// RemoveObject will delete a single cached object and all the chunks which belong to it
func (b *Persistent) RemoveObject(fp string) error {
parentDir, objName := path.Split(fp)
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
}
err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil {
fs.Debugf(fp, "couldn't delete obj from storage: %v", err)
}
// delete chunks on disk
// safe to ignore as the file might not have been open
_ = os.RemoveAll(path.Join(b.dataPath, fp))
return nil
})
}
// HasEntry confirms the existence of a single entry (dir or object)
func (b *Persistent) HasEntry(remote string) bool {
dir, name := path.Split(remote)
dir = cleanPath(dir)
name = cleanPath(name)
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", remote)
}
if f := bucket.Bucket([]byte(name)); f != nil {
return nil
}
if f := bucket.Get([]byte(name)); f != nil {
return nil
}
return errors.Errorf("couldn't find object (%v)", remote)
})
if err == nil {
return true
}
return false
}
// HasChunk confirms the existence of a single chunk of an object
func (b *Persistent) HasChunk(cachedObject *Object, offset int64) bool {
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
if _, err := os.Stat(fp); !os.IsNotExist(err) {
return true
}
return false
}
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := ioutil.ReadFile(fp)
if err != nil {
return nil, err
}
return data, err
}
// AddChunk adds a new chunk of a cached object
func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}
return b.db.Update(func(tx *bolt.Tx) error {
tsBucket := tx.Bucket([]byte(DataTsBucket))
ts := time.Now()
found := false
// delete (older) timestamps for the same object
c := tsBucket.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
var ci chunkInfo
err = json.Unmarshal(v, &ci)
if err != nil {
continue
}
if ci.Path == fp && ci.Offset == offset {
if tsInCache := time.Unix(0, btoi(k)); tsInCache.After(ts) && !found {
found = true
continue
}
err := c.Delete()
if err != nil {
fs.Debugf(fp, "failed to clean chunk: %v", err)
}
}
}
// don't overwrite if a newer one is already there
if found {
return nil
}
enc, err := json.Marshal(chunkInfo{Path: fp, Offset: offset, Size: int64(len(data))})
if err != nil {
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
}
err = tsBucket.Put(itob(ts.UnixNano()), enc)
if err != nil {
fs.Debugf(fp, "failed to timestamp chunk: %v", err)
}
return nil
})
}
// CleanChunksByAge will cleanup on a cron basis
func (b *Persistent) CleanChunksByAge(chunkAge time.Duration) {
// NOOP
}
// CleanChunksByNeed is a noop for this implementation
func (b *Persistent) CleanChunksByNeed(offset int64) {
// noop: we want to clean a Bolt DB by time only
}
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
func (b *Persistent) CleanChunksBySize(maxSize int64) {
b.cleanupMux.Lock()
defer b.cleanupMux.Unlock()
var cntChunks int
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
totalSize := int64(0)
for k, v := c.First(); k != nil; k, v = c.Next() {
var ci chunkInfo
err := json.Unmarshal(v, &ci)
if err != nil {
continue
}
totalSize += ci.Size
}
if totalSize > maxSize {
needToClean := totalSize - maxSize
for k, v := c.First(); k != nil; k, v = c.Next() {
var ci chunkInfo
err := json.Unmarshal(v, &ci)
if err != nil {
continue
}
// delete this ts entry
err = c.Delete()
if err != nil {
fs.Errorf(ci.Path, "failed deleting chunk ts during cleanup (%v): %v", ci.Offset, err)
continue
}
err = os.Remove(path.Join(b.dataPath, ci.Path, strconv.FormatInt(ci.Offset, 10)))
if err == nil {
cntChunks++
needToClean -= ci.Size
if needToClean <= 0 {
break
}
}
}
}
fs.Infof("cache", "deleted (%v) chunks", cntChunks)
return nil
})
if err != nil {
if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return
}
fs.Errorf("cache", "cleanup failed: %v", err)
}
}
// Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]interface{})
r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now()
r["files"]["newest-name"] = ""
r["files"]["total-files"] = 0
_ = b.db.View(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
rootTsBucket := tx.Bucket([]byte(RootTsBucket))
var totalDirs int
var totalFiles int
_ = b.iterateBuckets(tx.Bucket([]byte(RootBucket)), func(name string) {
totalDirs++
}, func(key string, val []byte) {
totalFiles++
})
r["files"]["total-dir"] = totalDirs
r["files"]["total-files"] = totalFiles
c := dataTsBucket.Cursor()
totalChunks := 0
totalSize := int64(0)
for k, v := c.First(); k != nil; k, v = c.Next() {
var ci chunkInfo
err := json.Unmarshal(v, &ci)
if err != nil {
continue
}
totalChunks++
totalSize += ci.Size
}
r["data"]["total-chunks"] = totalChunks
r["data"]["total-size"] = totalSize
if k, v := c.First(); k != nil {
var ci chunkInfo
_ = json.Unmarshal(v, &ci)
r["data"]["oldest-ts"] = time.Unix(0, btoi(k))
r["data"]["oldest-file"] = ci.Path
}
if k, v := c.Last(); k != nil {
var ci chunkInfo
_ = json.Unmarshal(v, &ci)
r["data"]["newest-ts"] = time.Unix(0, btoi(k))
r["data"]["newest-file"] = ci.Path
}
c = rootTsBucket.Cursor()
if k, v := c.First(); k != nil {
// split to get (abs path - offset)
r["files"]["oldest-ts"] = time.Unix(0, btoi(k))
r["files"]["oldest-name"] = string(v)
}
if k, v := c.Last(); k != nil {
r["files"]["newest-ts"] = time.Unix(0, btoi(k))
r["files"]["newest-name"] = string(v)
}
return nil
})
return r, nil
}
// Purge will flush the entire cache
func (b *Persistent) Purge() {
b.cleanupMux.Lock()
defer b.cleanupMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(RootBucket))
_ = tx.DeleteBucket([]byte(RootTsBucket))
_ = tx.DeleteBucket([]byte(DataTsBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(RootBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(RootTsBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(DataTsBucket))
return nil
})
err := os.RemoveAll(b.dataPath)
if err != nil {
fs.Errorf(b, "issue removing data folder: %v", err)
}
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
fs.Errorf(b, "issue removing data folder: %v", err)
}
}
// GetChunkTs retrieves the current timestamp of this chunk
func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
var t time.Time
err := b.db.View(func(tx *bolt.Tx) error {
tsBucket := tx.Bucket([]byte(DataTsBucket))
c := tsBucket.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
var ci chunkInfo
err := json.Unmarshal(v, &ci)
if err != nil {
continue
}
if ci.Path == path && ci.Offset == offset {
t = time.Unix(0, btoi(k))
return nil
}
}
return errors.Errorf("not found %v-%v", path, offset)
})
return t, err
}
func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string), kvFn func(key string, val []byte)) error {
err := b.db.View(func(tx *bolt.Tx) error {
var c *bolt.Cursor
if buk == nil {
c = tx.Cursor()
} else {
c = buk.Cursor()
}
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
var buk2 *bolt.Bucket
if buk == nil {
buk2 = tx.Bucket(k)
} else {
buk2 = buk.Bucket(k)
}
bucketFn(string(k))
_ = b.iterateBuckets(buk2, bucketFn, kvFn)
} else {
kvFn(string(k), v)
}
}
return nil
})
return err
}
// Close should be called when the program ends gracefully
func (b *Persistent) Close() {
b.cleanupMux.Lock()
defer b.cleanupMux.Unlock()
err := b.db.Close()
if err != nil {
fs.Errorf(b, "closing handle: %v", err)
}
}
// itob returns an 8-byte big endian representation of v.
func itob(v int64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
return b
}
func btoi(d []byte) int64 {
return int64(binary.BigEndian.Uint64(d))
}
// cloneBytes returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
var clone = make([]byte, len(v))
copy(clone, v)
return clone
}