mirror of
https://github.com/rclone/rclone.git
synced 2026-01-11 13:03:20 +00:00
Compare commits
4 Commits
experiment
...
onedrive-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
108ec53c0c | ||
|
|
64e303321b | ||
|
|
7d87386d58 | ||
|
|
beb773d20c |
@@ -1,17 +1,14 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
Current active maintainers of rclone are
|
||||
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
* Alex Chen @Cnly
|
||||
* Sandeep Ummadi @sandeepkru
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -91,4 +91,4 @@ License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
[COPYING file](/COPYING) included in this package).
|
||||
[COPYING file](/rclone/COPYING) included in this package).
|
||||
|
||||
@@ -417,8 +417,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
@@ -474,21 +474,6 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
if size == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
@@ -554,20 +539,26 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
// is this a directory marker?
|
||||
if *file.Properties.ContentLength == 0 {
|
||||
// Note that metadata with hdi_isfolder = true seems to be a
|
||||
// defacto standard for marking blobs as directories.
|
||||
endsWithSlash := strings.HasSuffix(remote, "/")
|
||||
if endsWithSlash || remote == "" || file.Metadata["hdi_isfolder"] == "true" {
|
||||
if endsWithSlash {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, false)
|
||||
@@ -990,37 +981,27 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = size
|
||||
o.size = info.ContentLength()
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(metadata)
|
||||
o.setMetadata(info.NewMetadata())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
metadata := info.Metadata
|
||||
size := *info.Properties.ContentLength
|
||||
if isDirectoryMarker(size, metadata, o.remote) {
|
||||
return fs.ErrorNotAFile
|
||||
}
|
||||
// NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain
|
||||
// this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5)
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = size
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(metadata)
|
||||
o.setMetadata(info.Metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -125,29 +125,6 @@ var (
|
||||
_linkTemplates map[string]*template.Template // available link types
|
||||
)
|
||||
|
||||
// Parse the scopes option returning a slice of scopes
|
||||
func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
return scopes
|
||||
}
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -162,14 +139,18 @@ func init() {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
if opt.Scope == "" {
|
||||
opt.Scope = defaultScope
|
||||
}
|
||||
driveConfig.Scopes = nil
|
||||
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if scope == "drive.appfolder" {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||
if err != nil {
|
||||
@@ -775,8 +756,7 @@ func newPacer() *pacer.Pacer {
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
|
||||
@@ -22,31 +22,6 @@ import (
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
|
||||
@@ -285,6 +285,7 @@ type AsyncOperationStatus struct {
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
|
||||
@@ -334,20 +334,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
// if `relPath` == "", it reads the metadata for the item with that ID.
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -356,6 +346,72 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
firstSlashIndex := strings.IndexRune(path, '/')
|
||||
|
||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||
var opts rest.Opts
|
||||
if len(path) == 0 {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// The following branch handles the case when we're using OneDrive Personal and the path is in a folder.
|
||||
// For OneDrive Personal, we need to consider the "shared with me" folders.
|
||||
// An item in such a folder can only be addressed by its ID relative to the sharer's driveID or
|
||||
// by its path relative to the folder's ID relative to the sharer's driveID.
|
||||
// Note: A "shared with me" folder can only be placed in the sharee's absolute root.
|
||||
// So we read metadata relative to a suitable folder's normalized ID.
|
||||
var dirCacheFoundRoot bool
|
||||
var rootNormalizedID string
|
||||
if f.dirCache != nil {
|
||||
var ok bool
|
||||
if rootNormalizedID, ok = f.dirCache.Get(""); ok {
|
||||
dirCacheFoundRoot = true
|
||||
}
|
||||
}
|
||||
|
||||
relPath, insideRoot := getRelativePathInsideBase(f.root, path)
|
||||
var firstDir, baseNormalizedID string
|
||||
if !insideRoot || !dirCacheFoundRoot {
|
||||
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
info, resp, err := f.readMetaDataForPath(firstDir)
|
||||
if err != nil {
|
||||
return info, resp, err
|
||||
}
|
||||
baseNormalizedID = info.GetID()
|
||||
} else {
|
||||
if f.root != "" {
|
||||
// Read metadata based on root
|
||||
baseNormalizedID = rootNormalizedID
|
||||
} else {
|
||||
// Read metadata based on firstDir
|
||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -514,18 +570,11 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
_, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
@@ -867,13 +916,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
DriveID: f.driveID,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
@@ -940,15 +989,23 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
|
||||
|
||||
if dstDriveID != srcObjDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: id,
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1024,7 +1081,20 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedDstDirID, _, _ := parseDirID(dstDirectoryID)
|
||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, srcDriveID, _ := parseNormalizedID(srcID)
|
||||
|
||||
if dstDriveID != srcDriveID {
|
||||
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
|
||||
// "Items cannot be moved between Drives using this request."
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -1038,14 +1108,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPath(srcPath)
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1055,7 +1119,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: parsedDstDirID,
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1122,7 +1187,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
opts := newOptsCall(info.ID, "POST", "/createLink")
|
||||
opts := newOptsCall(info.GetID(), "POST", "/createLink")
|
||||
|
||||
share := api.CreateShareLinkRequest{
|
||||
Type: "view",
|
||||
@@ -1270,13 +1335,13 @@ func (o *Object) ModTime() time.Time {
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1344,7 +1409,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
@@ -1477,13 +1542,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
var resp *http.Response
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1566,8 +1631,8 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
func newOptsCall(normalizedID string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseNormalizedID(normalizedID)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
@@ -1582,7 +1647,10 @@ func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`)
|
||||
// and returns itemID, driveID, rootURL.
|
||||
// Such a normalized ID can come from (*Item).GetID()
|
||||
func parseNormalizedID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], graphURL + "/drives"
|
||||
@@ -1590,6 +1658,21 @@ func parseDirID(ID string) (string, string, string) {
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
|
||||
// returns a relative path for `target` based on `base` and a boolean `true`.
|
||||
// Otherwise returns "", false.
|
||||
func getRelativePathInsideBase(base, target string) (string, bool) {
|
||||
if base == "" {
|
||||
return target, true
|
||||
}
|
||||
|
||||
baseSlash := base + "/"
|
||||
if strings.HasPrefix(target+"/", baseSlash) {
|
||||
return target[len(baseSlash):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
fs.Debugf(u, "Uploading as single part object to QingStor")
|
||||
fs.Debugf(u, "Tried to upload a singile object to QingStor")
|
||||
return u.singlePartUpload(reader)
|
||||
} else if err != nil {
|
||||
return errors.Errorf("read upload data failed: %s", err)
|
||||
}
|
||||
|
||||
fs.Debugf(u, "Uploading as multi-part object to QingStor")
|
||||
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
|
||||
mu := multiUploader{uploader: u}
|
||||
return mu.multiPartUpload(reader)
|
||||
}
|
||||
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
|
||||
req := qs.InitiateMultipartUploadInput{
|
||||
ContentType: &mu.cfg.mimeType,
|
||||
}
|
||||
fs.Debugf(mu, "Initiating a multi-part upload")
|
||||
fs.Debugf(mu, "Tried to initiate a multi-part upload")
|
||||
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.uploadID = rsp.UploadID
|
||||
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
ContentLength: &c.size,
|
||||
Body: c.buffer,
|
||||
}
|
||||
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
|
||||
|
||||
mu.mtx.Lock()
|
||||
defer mu.mtx.Unlock()
|
||||
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
fs.Debugf(mu, "Tried to list a multi-part")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
|
||||
ObjectParts: mu.objectParts,
|
||||
ETag: &md5String,
|
||||
}
|
||||
fs.Debugf(mu, "Completing multi-part object")
|
||||
fs.Debugf(mu, "Tried to complete a multi-part")
|
||||
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(mu, "Complete multi-part finished")
|
||||
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
|
||||
fs.Debugf(mu, "Tried to abort a multi-part")
|
||||
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
|
||||
}
|
||||
|
||||
|
||||
10
cmd/cmd.go
10
cmd/cmd.go
@@ -51,7 +51,7 @@ var (
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
errorTooManyArguents = errors.New("too many arguments")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -294,12 +294,14 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
|
||||
if len(args) < MinArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum: you provided %d non flag arguments: %q\n", cmd.Name(), MinArgs, len(args), args)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments minimum\n", cmd.Name(), MinArgs)
|
||||
// os.Exit(1)
|
||||
resolveExitCode(errorNotEnoughArguments)
|
||||
} else if len(args) > MaxArgs {
|
||||
_ = cmd.Usage()
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum: you provided %d non flag arguments: %q\n", cmd.Name(), MaxArgs, len(args), args)
|
||||
resolveExitCode(errorTooManyArguments)
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
|
||||
// os.Exit(1)
|
||||
resolveExitCode(errorTooManyArguents)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
|
||||
}
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, s.HTMLTemplate)
|
||||
directory := serve.NewDirectory(dirRemote)
|
||||
for _, node := range dirEntries {
|
||||
directory.AddEntry(node.Path(), node.IsDir())
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
"time"
|
||||
|
||||
auth "github.com/abbot/go-http-auth"
|
||||
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -109,9 +107,8 @@ type Server struct {
|
||||
waitChan chan struct{} // for waiting on the listener to close
|
||||
httpServer *http.Server
|
||||
basicPassHashed string
|
||||
useSSL bool // if server is configured for SSL/TLS
|
||||
usingAuth bool // set if authentication is configured
|
||||
HTMLTemplate *template.Template // HTML template for web interface
|
||||
useSSL bool // if server is configured for SSL/TLS
|
||||
usingAuth bool // set if authentication is configured
|
||||
}
|
||||
|
||||
// singleUserProvider provides the encrypted password for a single user
|
||||
@@ -208,12 +205,6 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
htmlTemplate, templateErr := data.GetTemplate()
|
||||
if templateErr != nil {
|
||||
log.Fatalf(templateErr.Error())
|
||||
}
|
||||
s.HTMLTemplate = htmlTemplate
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/shurcooL/vfsgen"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var AssetDir http.FileSystem = http.Dir("./templates")
|
||||
err := vfsgen.Generate(AssetDir, vfsgen.Options{
|
||||
PackageName: "data",
|
||||
BuildTags: "!dev",
|
||||
VariableName: "Assets",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
// Code generated by vfsgen; DO NOT EDIT.
|
||||
|
||||
// +build !dev
|
||||
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Assets statically implements the virtual filesystem provided to vfsgen.
|
||||
var Assets = func() http.FileSystem {
|
||||
fs := vfsgen۰FS{
|
||||
"/": &vfsgen۰DirInfo{
|
||||
name: "/",
|
||||
modTime: time.Date(2018, 12, 16, 6, 54, 42, 894445775, time.UTC),
|
||||
},
|
||||
"/index.html": &vfsgen۰CompressedFileInfo{
|
||||
name: "index.html",
|
||||
modTime: time.Date(2018, 12, 16, 6, 54, 42, 790442328, time.UTC),
|
||||
uncompressedSize: 226,
|
||||
|
||||
compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x8f\x31\xcf\x83\x20\x10\x86\x77\x7e\xc5\x7d\xc4\xf5\x93\xb8\x35\x0d\xb0\xb4\x6e\x26\x6d\x1a\x3b\x74\x3c\xeb\x29\x24\x4a\x13\xa4\x43\x43\xf8\xef\x0d\xea\xd4\x09\xee\x79\xef\x9e\xcb\xc9\xbf\xf3\xe5\xd4\x3e\xae\x35\x98\x30\x4f\x9a\xc9\xfc\xc0\x84\x6e\x54\x9c\x1c\xcf\x80\xb0\xd7\x4c\xce\x14\x10\x9e\x06\xfd\x42\x41\xf1\x77\x18\xfe\x0f\x39\x0d\x36\x4c\xa4\x63\x84\xb2\xcd\x3f\x48\x49\x8a\x8d\x31\x29\xf6\xd1\xee\xd5\x7f\xb2\xa8\xfa\xe9\x33\x95\x66\x31\x82\x47\x37\x12\x14\x16\x8e\x0a\xca\xda\x05\x6f\x69\xc9\x39\x82\xf1\x34\x28\x1e\x23\x14\xb6\xbc\xdf\x1a\x48\x89\xeb\xad\x6a\x08\x87\xd5\x81\x5a\x76\x1e\xc4\x2a\x22\xd7\xaf\x6c\xdf\x27\xb6\x8b\xbe\x01\x00\x00\xff\xff\x92\x2e\x35\x75\xe2\x00\x00\x00"),
|
||||
},
|
||||
}
|
||||
fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{
|
||||
fs["/index.html"].(os.FileInfo),
|
||||
}
|
||||
|
||||
return fs
|
||||
}()
|
||||
|
||||
type vfsgen۰FS map[string]interface{}
|
||||
|
||||
func (fs vfsgen۰FS) Open(path string) (http.File, error) {
|
||||
path = pathpkg.Clean("/" + path)
|
||||
f, ok := fs[path]
|
||||
if !ok {
|
||||
return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *vfsgen۰CompressedFileInfo:
|
||||
gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent))
|
||||
if err != nil {
|
||||
// This should never happen because we generate the gzip bytes such that they are always valid.
|
||||
panic("unexpected error reading own gzip compressed bytes: " + err.Error())
|
||||
}
|
||||
return &vfsgen۰CompressedFile{
|
||||
vfsgen۰CompressedFileInfo: f,
|
||||
gr: gr,
|
||||
}, nil
|
||||
case *vfsgen۰DirInfo:
|
||||
return &vfsgen۰Dir{
|
||||
vfsgen۰DirInfo: f,
|
||||
}, nil
|
||||
default:
|
||||
// This should never happen because we generate only the above types.
|
||||
panic(fmt.Sprintf("unexpected type %T", f))
|
||||
}
|
||||
}
|
||||
|
||||
// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file.
|
||||
type vfsgen۰CompressedFileInfo struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
compressedContent []byte
|
||||
uncompressedSize int64
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, fmt.Errorf("cannot Readdir from file %s", f.name)
|
||||
}
|
||||
func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil }
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte {
|
||||
return f.compressedContent
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name }
|
||||
func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize }
|
||||
func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 }
|
||||
func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime }
|
||||
func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false }
|
||||
func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// vfsgen۰CompressedFile is an opened compressedFile instance.
|
||||
type vfsgen۰CompressedFile struct {
|
||||
*vfsgen۰CompressedFileInfo
|
||||
gr *gzip.Reader
|
||||
grPos int64 // Actual gr uncompressed position.
|
||||
seekPos int64 // Seek uncompressed position.
|
||||
}
|
||||
|
||||
func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
|
||||
if f.grPos > f.seekPos {
|
||||
// Rewind to beginning.
|
||||
err = f.gr.Reset(bytes.NewReader(f.compressedContent))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.grPos = 0
|
||||
}
|
||||
if f.grPos < f.seekPos {
|
||||
// Fast-forward.
|
||||
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.grPos = f.seekPos
|
||||
}
|
||||
n, err = f.gr.Read(p)
|
||||
f.grPos += int64(n)
|
||||
f.seekPos = f.grPos
|
||||
return n, err
|
||||
}
|
||||
func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
f.seekPos = 0 + offset
|
||||
case io.SeekCurrent:
|
||||
f.seekPos += offset
|
||||
case io.SeekEnd:
|
||||
f.seekPos = f.uncompressedSize + offset
|
||||
default:
|
||||
panic(fmt.Errorf("invalid whence value: %v", whence))
|
||||
}
|
||||
return f.seekPos, nil
|
||||
}
|
||||
func (f *vfsgen۰CompressedFile) Close() error {
|
||||
return f.gr.Close()
|
||||
}
|
||||
|
||||
// vfsgen۰DirInfo is a static definition of a directory.
|
||||
type vfsgen۰DirInfo struct {
|
||||
name string
|
||||
modTime time.Time
|
||||
entries []os.FileInfo
|
||||
}
|
||||
|
||||
func (d *vfsgen۰DirInfo) Read([]byte) (int, error) {
|
||||
return 0, fmt.Errorf("cannot Read from directory %s", d.name)
|
||||
}
|
||||
func (d *vfsgen۰DirInfo) Close() error { return nil }
|
||||
func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil }
|
||||
|
||||
func (d *vfsgen۰DirInfo) Name() string { return d.name }
|
||||
func (d *vfsgen۰DirInfo) Size() int64 { return 0 }
|
||||
func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir }
|
||||
func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime }
|
||||
func (d *vfsgen۰DirInfo) IsDir() bool { return true }
|
||||
func (d *vfsgen۰DirInfo) Sys() interface{} { return nil }
|
||||
|
||||
// vfsgen۰Dir is an opened dir instance.
|
||||
type vfsgen۰Dir struct {
|
||||
*vfsgen۰DirInfo
|
||||
pos int // Position within entries for Seek and Readdir.
|
||||
}
|
||||
|
||||
func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) {
|
||||
if offset == 0 && whence == io.SeekStart {
|
||||
d.pos = 0
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("unsupported Seek in directory %s", d.name)
|
||||
}
|
||||
|
||||
func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if d.pos >= len(d.entries) && count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
if count <= 0 || count > len(d.entries)-d.pos {
|
||||
count = len(d.entries) - d.pos
|
||||
}
|
||||
e := d.entries[d.pos : d.pos+count]
|
||||
d.pos += count
|
||||
return e, nil
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
//go:generate go run assets_generate.go
|
||||
// The "go:generate" directive compiles static assets by running assets_generate.go
|
||||
|
||||
package data
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetTemplate eturns the HTML template for serving directories via HTTP
|
||||
func GetTemplate() (tpl *template.Template, err error) {
|
||||
templateFile, err := Assets.Open("index.html")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template open")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(templateFile, &err)
|
||||
|
||||
templateBytes, err := ioutil.ReadAll(templateFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template read")
|
||||
}
|
||||
|
||||
var templateString = string(templateBytes)
|
||||
|
||||
tpl, err = template.New("index").Parse(templateString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get template parse")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ .Title }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ .Title }}</h1>
|
||||
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
|
||||
{{ end }}</body>
|
||||
</html>
|
||||
@@ -21,19 +21,17 @@ type DirEntry struct {
|
||||
|
||||
// Directory represents a directory
|
||||
type Directory struct {
|
||||
DirRemote string
|
||||
Title string
|
||||
Entries []DirEntry
|
||||
Query string
|
||||
HTMLTemplate *template.Template
|
||||
DirRemote string
|
||||
Title string
|
||||
Entries []DirEntry
|
||||
Query string
|
||||
}
|
||||
|
||||
// NewDirectory makes an empty Directory
|
||||
func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory {
|
||||
func NewDirectory(dirRemote string) *Directory {
|
||||
d := &Directory{
|
||||
DirRemote: dirRemote,
|
||||
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
|
||||
HTMLTemplate: htmlTemplate,
|
||||
DirRemote: dirRemote,
|
||||
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
|
||||
}
|
||||
return d
|
||||
}
|
||||
@@ -79,10 +77,26 @@ func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
|
||||
defer accounting.Stats.DoneTransferring(d.DirRemote, true)
|
||||
|
||||
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
|
||||
|
||||
err := d.HTMLTemplate.Execute(w, d)
|
||||
err := indexTemplate.Execute(w, d)
|
||||
if err != nil {
|
||||
Error(d.DirRemote, w, "Failed to render template", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// indexPage is a directory listing template
|
||||
var indexPage = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>{{ .Title }}</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{ .Title }}</h1>
|
||||
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
|
||||
{{ end }}</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
// indexTemplate is the instantiated indexPage
|
||||
var indexTemplate = template.Must(template.New("index").Parse(indexPage))
|
||||
|
||||
@@ -2,32 +2,23 @@ package serve
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/httplib/serve/data"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func GetTemplate(t *testing.T) *template.Template {
|
||||
htmlTemplate, err := data.GetTemplate()
|
||||
require.NoError(t, err)
|
||||
return htmlTemplate
|
||||
}
|
||||
|
||||
func TestNewDirectory(t *testing.T) {
|
||||
d := NewDirectory("z", GetTemplate(t))
|
||||
d := NewDirectory("z")
|
||||
assert.Equal(t, "z", d.DirRemote)
|
||||
assert.Equal(t, "Directory listing of /z", d.Title)
|
||||
}
|
||||
|
||||
func TestSetQuery(t *testing.T) {
|
||||
d := NewDirectory("z", GetTemplate(t))
|
||||
d := NewDirectory("z")
|
||||
assert.Equal(t, "", d.Query)
|
||||
d.SetQuery(url.Values{"potato": []string{"42"}})
|
||||
assert.Equal(t, "?potato=42", d.Query)
|
||||
@@ -36,7 +27,7 @@ func TestSetQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddEntry(t *testing.T) {
|
||||
var d = NewDirectory("z", GetTemplate(t))
|
||||
var d = NewDirectory("z")
|
||||
d.AddEntry("", true)
|
||||
d.AddEntry("dir", true)
|
||||
d.AddEntry("a/b/c/d.txt", false)
|
||||
@@ -51,7 +42,7 @@ func TestAddEntry(t *testing.T) {
|
||||
}, d.Entries)
|
||||
|
||||
// Now test with a query parameter
|
||||
d = NewDirectory("z", GetTemplate(t)).SetQuery(url.Values{"potato": []string{"42"}})
|
||||
d = NewDirectory("z").SetQuery(url.Values{"potato": []string{"42"}})
|
||||
d.AddEntry("file", false)
|
||||
d.AddEntry("dir", true)
|
||||
assert.Equal(t, []DirEntry{
|
||||
@@ -71,7 +62,7 @@ func TestError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServe(t *testing.T) {
|
||||
d := NewDirectory("aDirectory", GetTemplate(t))
|
||||
d := NewDirectory("aDirectory")
|
||||
d.AddEntry("file", false)
|
||||
d.AddEntry("dir", true)
|
||||
|
||||
|
||||
@@ -154,7 +154,7 @@ Contributors
|
||||
* Michael P. Dubner <pywebmail@list.ru>
|
||||
* Antoine GIRARD <sapk@users.noreply.github.com>
|
||||
* Mateusz Piotrowski <mpp302@gmail.com>
|
||||
* Animosity022 <animosity22@users.noreply.github.com> <earl.texter@gmail.com>
|
||||
* Animosity022 <animosity22@users.noreply.github.com>
|
||||
* Peter Baumgartner <pete@lincolnloop.com>
|
||||
* Craig Rachel <craig@craigrachel.com>
|
||||
* Michael G. Noll <miguno@users.noreply.github.com>
|
||||
@@ -221,7 +221,3 @@ Contributors
|
||||
* Mathieu Carbou <mathieu.carbou@gmail.com>
|
||||
* Mark Otway <mark@otway.com>
|
||||
* William Cocker <37018962+WilliamCocker@users.noreply.github.com>
|
||||
* François Leurent <131.js@cloudyks.org>
|
||||
* Arkadius Stefanski <arkste@gmail.com>
|
||||
* Jay <dev@jaygoel.com>
|
||||
* andrea rota <a@xelera.eu>
|
||||
|
||||
@@ -98,8 +98,7 @@ excess files in the bucket.
|
||||
B2 supports multiple [Application Keys for different access permission
|
||||
to B2 Buckets](https://www.backblaze.com/b2/docs/application_keys.html).
|
||||
|
||||
You can use these with rclone too; you will need to use rclone version 1.43
|
||||
or later.
|
||||
You can use these with rclone too.
|
||||
|
||||
Follow Backblaze's docs to create an Application Key with the required
|
||||
permission and add the `Application Key ID` as the `account` and the
|
||||
|
||||
@@ -267,15 +267,6 @@ Options
|
||||
|
||||
Rclone has a number of options to control its behaviour.
|
||||
|
||||
Options that take parameters can have the values passed in two ways,
|
||||
`--option=value` or `--option value`. However boolean (true/false)
|
||||
options behave slightly differently to the other options in that
|
||||
`--boolean` sets the option to `true` and the absence of the flag sets
|
||||
it to `false`. It is also possible to specify `--boolean=false` or
|
||||
`--boolean=true`. Note that `--boolean false` is not valid - this is
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options which use TIME use the go time parser. A duration string is a
|
||||
possibly signed sequence of decimal numbers, each with optional
|
||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||
|
||||
@@ -242,17 +242,13 @@ platforms they are common. Rclone will map these names to and from an
|
||||
identical looking unicode equivalent. For example if a file has a `?`
|
||||
in it will be mapped to `?` instead.
|
||||
|
||||
The largest allowed file sizes are 15GB for OneDrive for Business and 35GB for OneDrive Personal (Updated 4 Jan 2019).
|
||||
|
||||
The entire path, including the file name, must contain fewer than 400 characters for OneDrive, OneDrive for Business and SharePoint Online. If you are encrypting file and folder names with rclone, you may want to pay attention to this limitation because the encrypted names are typically longer than the original ones.
|
||||
The largest allowed file size is 10GiB (10,737,418,240 bytes).
|
||||
|
||||
OneDrive seems to be OK with at least 50,000 files in a folder, but at
|
||||
100,000 rclone will get errors listing the directory like `couldn’t
|
||||
list files: UnknownError:`. See
|
||||
[#2707](https://github.com/ncw/rclone/issues/2707) for more info.
|
||||
|
||||
An official document about the limitations for different types of OneDrive can be found [here](https://support.office.com/en-us/article/invalid-file-names-and-file-types-in-onedrive-onedrive-for-business-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa).
|
||||
|
||||
### Versioning issue ###
|
||||
|
||||
Every change in OneDrive causes the service to create a new version.
|
||||
@@ -264,16 +260,6 @@ The `copy` is the only rclone command affected by this as we copy
|
||||
the file and then afterwards set the modification time to match the
|
||||
source file.
|
||||
|
||||
**Note**: Starting October 2018, users will no longer be able to disable versioning by default. This is because Microsoft has brought an [update](https://techcommunity.microsoft.com/t5/Microsoft-OneDrive-Blog/New-Updates-to-OneDrive-and-SharePoint-Team-Site-Versioning/ba-p/204390) to the mechanism. To change this new default setting, a PowerShell command is required to be run by a SharePoint admin. If you are an admin, you can run these commands in PowerShell to change that setting:
|
||||
|
||||
1. `Install-Module -Name Microsoft.Online.SharePoint.PowerShell` (in case you haven't installed this already)
|
||||
1. `Import-Module Microsoft.Online.SharePoint.PowerShell -DisableNameChecking`
|
||||
1. `Connect-SPOService -Url https://YOURSITE-admin.sharepoint.com -Credential YOU@YOURSITE.COM` (replacing `YOURSITE`, `YOU`, `YOURSITE.COM` with the actual values; this will prompt for your credentials)
|
||||
1. `Set-SPOTenant -EnableMinimumVersionRequirement $False`
|
||||
1. `Disconnect-SPOService` (to disconnect from the server)
|
||||
|
||||
*Below are the steps for normal users to disable versioning. If you don't see the "No Versioning" option, make sure the above requirements are met.*
|
||||
|
||||
User [Weropol](https://github.com/Weropol) has found a method to disable
|
||||
versioning on OneDrive
|
||||
|
||||
|
||||
@@ -127,19 +127,6 @@ does not take any path arguments.
|
||||
To view your current quota you can use the `rclone about remote:`
|
||||
command which will display your usage limit (quota) and the current usage.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
When uploading very large files (bigger than about 5GB) you will need
|
||||
to increase the `--timeout` parameter. This is because Yandex pauses
|
||||
(perhaps to calculate the MD5SUM for the entire file) before returning
|
||||
confirmation that the file has been uploaded. The default handling of
|
||||
timeouts in rclone is to assume a 5 minute pause is an error and close
|
||||
the connection - you'll see `net/http: timeout awaiting response
|
||||
headers` errors in the logs if this is happening. Setting the timeout
|
||||
to twice the max size of file in GB should be enough, so if you want
|
||||
to upload a 30GB file set a timeout of `2 * 30 = 60m`, that is
|
||||
`--timeout 60m`.
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/yandex/yandex.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
|
||||
@@ -575,17 +575,6 @@ func SetValueAndSave(name, key, value string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileGetFresh reads the config key under section return the value or
|
||||
// an error if the config file was not found or that value couldn't be
|
||||
// read.
|
||||
func FileGetFresh(section, key string) (value string, err error) {
|
||||
reloadedConfigFile, err := loadConfigFile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return reloadedConfigFile.GetValue(section, key)
|
||||
}
|
||||
|
||||
// ShowRemotes shows an overview of the config file
|
||||
func ShowRemotes() {
|
||||
remotes := getConfigData().GetSectionList()
|
||||
|
||||
@@ -12,7 +12,6 @@ func MimeTypeFromName(remote string) (mimeType string) {
|
||||
if !strings.ContainsRune(mimeType, '/') {
|
||||
mimeType = "application/octet-stream"
|
||||
}
|
||||
mimeType = strings.Replace(mimeType, "; charset=utf-8", "", -1) // Remove charset
|
||||
return mimeType
|
||||
}
|
||||
|
||||
|
||||
@@ -1337,14 +1337,6 @@ func RcatSize(fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modT
|
||||
accounting.Stats.Transferring(dstFileName)
|
||||
body := ioutil.NopCloser(in) // we let the server close the body
|
||||
in := accounting.NewAccountSizeName(body, size, dstFileName) // account the transfer (no buffering)
|
||||
|
||||
if fs.Config.DryRun {
|
||||
fs.Logf("stdin", "Not uploading as --dry-run")
|
||||
// prevents "broken pipe" errors
|
||||
_, err = io.Copy(ioutil.Discard, in)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
defer func() {
|
||||
closeErr := in.Close()
|
||||
|
||||
@@ -211,7 +211,7 @@ func (s *Server) handleOptions(w http.ResponseWriter, r *http.Request, path stri
|
||||
func (s *Server) serveRoot(w http.ResponseWriter, r *http.Request) {
|
||||
remotes := config.FileSections()
|
||||
sort.Strings(remotes)
|
||||
directory := serve.NewDirectory("", s.HTMLTemplate)
|
||||
directory := serve.NewDirectory("")
|
||||
directory.Title = "List of all rclone remotes."
|
||||
q := url.Values{}
|
||||
for _, remote := range remotes {
|
||||
@@ -235,7 +235,7 @@ func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string
|
||||
return
|
||||
}
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(path, s.HTMLTemplate)
|
||||
directory := serve.NewDirectory(path)
|
||||
for _, entry := range entries {
|
||||
_, isDir := entry.(fs.Directory)
|
||||
directory.AddEntry(entry.Remote(), isDir)
|
||||
|
||||
6
go.mod
6
go.mod
@@ -15,7 +15,7 @@ require (
|
||||
github.com/cpuguy83/go-md2man v1.0.8 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/djherbis/times v1.1.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
@@ -29,7 +29,7 @@ require (
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.3 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/swift v1.0.43
|
||||
github.com/ncw/swift v1.0.42
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
@@ -40,8 +40,6 @@ require (
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/russross/blackfriday v1.5.2 // indirect
|
||||
github.com/sevlyar/go-daemon v0.1.4
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
|
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect
|
||||
|
||||
8
go.sum
8
go.sum
@@ -31,8 +31,6 @@ github.com/dropbox/dropbox-sdk-go-unofficial v4.1.0+incompatible/go.mod h1:lr+Lh
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.0+incompatible h1:FQu9Ef2dkC8g2rQmcQmpXXeoRegXHODBfveKKZu6+e8=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible h1:4HSS6BiPqvgsn/zrwt6KOYY+mw153zmhvewZIRh1+Ds=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible h1:9jnukMIowLSo3SY7+GTwxmYJv4QC0LxXbo97zHWCyoc=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8=
|
||||
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
|
||||
github.com/goftp/server v0.0.0-20180914132916-1fd52c8552f1 h1:WjgeEHEDLGx56ndxS6FYi6qFjZGajSVHPuEPdpJ60cI=
|
||||
@@ -68,8 +66,6 @@ github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zX
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
|
||||
github.com/ncw/swift v1.0.42 h1:ztvRb6hs52IHOcaYt73f9lXYLIeIuWgdooRDhdyllGI=
|
||||
github.com/ncw/swift v1.0.42/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/ncw/swift v1.0.43 h1:TZn2l/bPV0CqG+/G5BFh/ROWnyX7dL2D0URaOjNQRsw=
|
||||
github.com/ncw/swift v1.0.43/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed h1:bAVGG6B+R5qpSylrrA+BAMrzYkdAoiTaKPVxRB+4cyM=
|
||||
github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
@@ -90,10 +86,6 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/sevlyar/go-daemon v0.1.4 h1:Ayxp/9SNHwPBjV+kKbnHl2ch6rhxTu08jfkGkoxgULQ=
|
||||
github.com/sevlyar/go-daemon v0.1.4/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c h1:fyKiXKO1/I/B6Y2U8T7WdQGWzwehOuGIrljPtt7YTTI=
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
|
||||
@@ -153,30 +153,6 @@ type TokenSource struct {
|
||||
expiryTimer *time.Timer // signals whenever the token expires
|
||||
}
|
||||
|
||||
// If token has expired then first try re-reading it from the config
|
||||
// file in case a concurrently runnng rclone has updated it already
|
||||
func (ts *TokenSource) reReadToken() bool {
|
||||
tokenString, err := config.FileGetFresh(ts.name, config.ConfigToken)
|
||||
if err != nil {
|
||||
fs.Debugf(ts.name, "Failed to read token out of config file: %v", err)
|
||||
return false
|
||||
}
|
||||
newToken := new(oauth2.Token)
|
||||
err = json.Unmarshal([]byte(tokenString), newToken)
|
||||
if err != nil {
|
||||
fs.Debugf(ts.name, "Failed to parse token out of config file: %v", err)
|
||||
return false
|
||||
}
|
||||
if !newToken.Valid() {
|
||||
fs.Debugf(ts.name, "Loaded invalid token from config file - ignoring")
|
||||
return false
|
||||
}
|
||||
fs.Debugf(ts.name, "Loaded fresh token from config file")
|
||||
ts.token = newToken
|
||||
ts.tokenSource = nil // invalidate since we changed the token
|
||||
return true
|
||||
}
|
||||
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
@@ -185,39 +161,17 @@ func (ts *TokenSource) reReadToken() bool {
|
||||
func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
||||
ts.mu.Lock()
|
||||
defer ts.mu.Unlock()
|
||||
var (
|
||||
token *oauth2.Token
|
||||
err error
|
||||
changed = false
|
||||
)
|
||||
const maxTries = 5
|
||||
|
||||
// Try getting the token a few times
|
||||
for i := 1; i <= maxTries; i++ {
|
||||
// Try reading the token from the config file in case it has
|
||||
// been updated by a concurrent rclone process
|
||||
if !ts.token.Valid() {
|
||||
if ts.reReadToken() {
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
|
||||
// Make a new token source if required
|
||||
if ts.tokenSource == nil {
|
||||
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
|
||||
}
|
||||
|
||||
token, err = ts.tokenSource.Token()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debugf(ts.name, "Token refresh failed try %d/%d: %v", i, maxTries, err)
|
||||
time.Sleep(1 * time.Second)
|
||||
// Make a new token source if required
|
||||
if ts.tokenSource == nil {
|
||||
ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token)
|
||||
}
|
||||
|
||||
token, err := ts.tokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changed = changed || (*token != *ts.token)
|
||||
changed := *token != *ts.token
|
||||
ts.token = token
|
||||
if changed {
|
||||
// Bump on the expiry timer if it is set
|
||||
@@ -404,7 +358,7 @@ func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Reque
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return PutToken(name, m, token, true)
|
||||
return PutToken(name, m, token, false)
|
||||
}
|
||||
case TitleBarRedirectURL:
|
||||
useWebServer = automatic
|
||||
|
||||
33
vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth/sdk.go
generated
vendored
33
vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth/sdk.go
generated
vendored
@@ -9,19 +9,6 @@ import (
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
)
|
||||
|
||||
// AuthAPIError wraps AuthError
|
||||
type AuthAPIError struct {
|
||||
dropbox.APIError
|
||||
AuthError *AuthError `json:"error"`
|
||||
}
|
||||
|
||||
// AccessAPIError wraps AccessError
|
||||
type AccessAPIError struct {
|
||||
dropbox.APIError
|
||||
AccessError *AccessError `json:"error"`
|
||||
}
|
||||
|
||||
// RateLimitAPIError wraps RateLimitError
|
||||
type RateLimitAPIError struct {
|
||||
dropbox.APIError
|
||||
RateLimitError *RateLimitError `json:"error"`
|
||||
@@ -29,22 +16,7 @@ type RateLimitAPIError struct {
|
||||
|
||||
// HandleCommonAuthErrors handles common authentication errors
|
||||
func HandleCommonAuthErrors(c dropbox.Config, resp *http.Response, body []byte) error {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
var apiError AuthAPIError
|
||||
if err := json.Unmarshal(body, &apiError); err != nil {
|
||||
c.LogDebug("Error unmarshaling '%s' into JSON", body)
|
||||
return err
|
||||
}
|
||||
return apiError
|
||||
case http.StatusForbidden:
|
||||
var apiError AccessAPIError
|
||||
if err := json.Unmarshal(body, &apiError); err != nil {
|
||||
c.LogDebug("Error unmarshaling '%s' into JSON", body)
|
||||
return err
|
||||
}
|
||||
return apiError
|
||||
case http.StatusTooManyRequests:
|
||||
if resp.StatusCode == http.StatusTooManyRequests {
|
||||
var apiError RateLimitAPIError
|
||||
// Check content-type
|
||||
contentType, _, _ := mime.ParseMediaType(resp.Header.Get("content-type"))
|
||||
@@ -61,7 +33,6 @@ func HandleCommonAuthErrors(c dropbox.Config, resp *http.Response, body []byte)
|
||||
apiError.RateLimitError.RetryAfter = uint64(timeout)
|
||||
}
|
||||
return apiError
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
18
vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go
generated
vendored
18
vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go
generated
vendored
@@ -549,7 +549,7 @@ func (dbx *apiImpl) CopyV2(arg *RelocationArg) (res *RelocationResult, err error
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "copy_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "copy", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -695,7 +695,7 @@ func (dbx *apiImpl) CopyBatchV2(arg *RelocationBatchArgBase) (res *RelocationBat
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "copy_batch_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "copy_batch", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -830,7 +830,7 @@ func (dbx *apiImpl) CopyBatchCheckV2(arg *async.PollArg) (res *RelocationBatchV2
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "copy_batch/check_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "copy_batch/check", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1097,7 +1097,7 @@ func (dbx *apiImpl) CreateFolderV2(arg *CreateFolderArg) (res *CreateFolderResul
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "create_folder_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "create_folder", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1364,7 +1364,7 @@ func (dbx *apiImpl) DeleteV2(arg *DeleteArg) (res *DeleteResult, err error) {
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "delete_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "delete", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -2515,7 +2515,7 @@ func (dbx *apiImpl) MoveV2(arg *RelocationArg) (res *RelocationResult, err error
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "move_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "move", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -2661,7 +2661,7 @@ func (dbx *apiImpl) MoveBatchV2(arg *MoveBatchArg) (res *RelocationBatchV2Launch
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "move_batch_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "move_batch", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -2793,7 +2793,7 @@ func (dbx *apiImpl) MoveBatchCheckV2(arg *async.PollArg) (res *RelocationBatchV2
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "move_batch/check_v2", headers, bytes.NewReader(b))
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("api", "rpc", true, "files", "move_batch/check", headers, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -3698,7 +3698,7 @@ func (dbx *apiImpl) UploadSessionAppendV2(arg *UploadSessionAppendArg, content i
|
||||
headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID
|
||||
}
|
||||
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("content", "upload", true, "files", "upload_session/append_v2", headers, content)
|
||||
req, err := (*dropbox.Context)(dbx).NewRequest("content", "upload", true, "files", "upload_session/append", headers, content)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
2
vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go
generated
vendored
2
vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go
generated
vendored
@@ -37,7 +37,7 @@ const (
|
||||
hostAPI = "api"
|
||||
hostContent = "content"
|
||||
hostNotify = "notify"
|
||||
sdkVersion = "5.4.0"
|
||||
sdkVersion = "5.2.0"
|
||||
specVersion = "097e9ba"
|
||||
)
|
||||
|
||||
|
||||
2
vendor/github.com/ncw/swift/README.md
generated
vendored
2
vendor/github.com/ncw/swift/README.md
generated
vendored
@@ -150,5 +150,3 @@ Contributors
|
||||
- Arthur Paim Arnold <arthurpaimarnold@gmail.com>
|
||||
- Bruno Michel <bmichel@menfin.info>
|
||||
- Charles Hsu <charles0126@gmail.com>
|
||||
- Omar Ali <omarali@users.noreply.github.com>
|
||||
- Andreas Andersen <andreas@softwaredesign.se>
|
||||
|
||||
21
vendor/github.com/ncw/swift/compatibility_1_6.go
generated
vendored
21
vendor/github.com/ncw/swift/compatibility_1_6.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
// +build go1.6
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const IS_AT_LEAST_GO_16 = true
|
||||
|
||||
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {
|
||||
tr.ExpectContinueTimeout = t
|
||||
}
|
||||
|
||||
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {
|
||||
req.Header.Add("Expect", "100-continue")
|
||||
if !hasContentLength {
|
||||
req.TransferEncoding = []string{"chunked"}
|
||||
}
|
||||
}
|
||||
13
vendor/github.com/ncw/swift/compatibility_not_1_6.go
generated
vendored
13
vendor/github.com/ncw/swift/compatibility_not_1_6.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !go1.6
|
||||
|
||||
package swift
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const IS_AT_LEAST_GO_16 = false
|
||||
|
||||
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {}
|
||||
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {}
|
||||
13
vendor/github.com/ncw/swift/swift.go
generated
vendored
13
vendor/github.com/ncw/swift/swift.go
generated
vendored
@@ -423,15 +423,12 @@ func (c *Connection) setDefaults() {
|
||||
c.Timeout = 60 * time.Second
|
||||
}
|
||||
if c.Transport == nil {
|
||||
t := &http.Transport{
|
||||
c.Transport = &http.Transport{
|
||||
// TLSClientConfig: &tls.Config{RootCAs: pool},
|
||||
// DisableCompression: true,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
// Half of linux's default open files limit (1024).
|
||||
MaxIdleConnsPerHost: 512,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
MaxIdleConnsPerHost: 2048,
|
||||
}
|
||||
SetExpectContinueTimeout(t, 5*time.Second)
|
||||
c.Transport = t
|
||||
}
|
||||
if c.client == nil {
|
||||
c.client = &http.Client{
|
||||
@@ -723,10 +720,6 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response,
|
||||
}
|
||||
req.Header.Add("User-Agent", c.UserAgent)
|
||||
req.Header.Add("X-Auth-Token", authToken)
|
||||
|
||||
_, hasCL := p.Headers["Content-Length"]
|
||||
AddExpectAndTransferEncoding(req, hasCL)
|
||||
|
||||
resp, err = c.doTimeoutRequest(timer, req)
|
||||
if err != nil {
|
||||
if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 {
|
||||
|
||||
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@@ -63,7 +63,7 @@ github.com/cpuguy83/go-md2man/md2man
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/djherbis/times v1.1.0
|
||||
github.com/djherbis/times
|
||||
# github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
|
||||
# github.com/dropbox/dropbox-sdk-go-unofficial v5.0.1-0.20181205034806-56e5f6595305+incompatible
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/dropbox
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common
|
||||
@@ -97,7 +97,7 @@ github.com/kr/fs
|
||||
github.com/mattn/go-runewidth
|
||||
# github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
github.com/ncw/go-acd
|
||||
# github.com/ncw/swift v1.0.43
|
||||
# github.com/ncw/swift v1.0.42
|
||||
github.com/ncw/swift
|
||||
# github.com/nsf/termbox-go v0.0.0-20181027232701-60ab7e3d12ed
|
||||
github.com/nsf/termbox-go
|
||||
|
||||
14
vfs/file.go
14
vfs/file.go
@@ -331,10 +331,9 @@ func (f *File) setSize(n int64) {
|
||||
// Update the object when written and add it to the directory
|
||||
func (f *File) setObject(o fs.Object) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.o = o
|
||||
_ = f.applyPendingModTime()
|
||||
f.mu.Unlock()
|
||||
|
||||
f.d.addObject(f)
|
||||
}
|
||||
|
||||
@@ -441,23 +440,20 @@ func (f *File) Sync() error {
|
||||
|
||||
// Remove the file
|
||||
func (f *File) Remove() error {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.muRW.Lock()
|
||||
defer f.muRW.Unlock()
|
||||
if f.d.vfs.Opt.ReadOnly {
|
||||
return EROFS
|
||||
}
|
||||
f.muRW.Lock() // muRW must be locked before mu to avoid
|
||||
f.mu.Lock() // deadlock in RWFileHandle.openPending and .close
|
||||
if f.o != nil {
|
||||
err := f.o.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "File.Remove file error: %v", err)
|
||||
f.mu.Unlock()
|
||||
f.muRW.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.mu.Unlock()
|
||||
f.muRW.Unlock()
|
||||
|
||||
// Remove the item from the directory listing
|
||||
f.d.delObject(f.Name())
|
||||
// Remove the object from the cache
|
||||
|
||||
@@ -1,330 +0,0 @@
|
||||
// Test the VFS to exhaustion, specifically looking for deadlocks
|
||||
//
|
||||
// Run on a mounted filesystem
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
nameLength = flag.Int("name-length", 10, "Length of names to create")
|
||||
verbose = flag.Bool("v", false, "Set to show more info")
|
||||
number = flag.Int("n", 4, "Number of tests to run simultaneously")
|
||||
iterations = flag.Int("i", 100, "Iterations of the test")
|
||||
timeout = flag.Duration("timeout", 10*time.Second, "Inactivity time to detect a deadlock")
|
||||
testNumber int32
|
||||
)
|
||||
|
||||
// Seed the random number generator
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
}
|
||||
|
||||
// RandomString create a random string for test purposes
|
||||
func RandomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// Test contains stats about the running test which work for files or
|
||||
// directories
|
||||
type Test struct {
|
||||
dir string
|
||||
name string
|
||||
created bool
|
||||
handle *os.File
|
||||
tests []func()
|
||||
isDir bool
|
||||
number int32
|
||||
prefix string
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// NewTest creates a new test and fills in the Tests
|
||||
func NewTest(Dir string) *Test {
|
||||
t := &Test{
|
||||
dir: Dir,
|
||||
name: RandomString(*nameLength),
|
||||
isDir: rand.Intn(2) == 0,
|
||||
number: atomic.AddInt32(&testNumber, 1),
|
||||
timer: time.NewTimer(*timeout),
|
||||
}
|
||||
width := int(math.Floor(math.Log10(float64(*number)))) + 1
|
||||
t.prefix = fmt.Sprintf("%*d: %s: ", width, t.number, t.path())
|
||||
if t.isDir {
|
||||
t.tests = []func(){
|
||||
t.list,
|
||||
t.rename,
|
||||
t.mkdir,
|
||||
t.rmdir,
|
||||
}
|
||||
} else {
|
||||
t.tests = []func(){
|
||||
t.list,
|
||||
t.rename,
|
||||
t.open,
|
||||
t.close,
|
||||
t.remove,
|
||||
t.read,
|
||||
t.write,
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// kick the deadlock timeout
|
||||
func (t *Test) kick() {
|
||||
if !t.timer.Stop() {
|
||||
<-t.timer.C
|
||||
}
|
||||
t.timer.Reset(*timeout)
|
||||
}
|
||||
|
||||
// randomTest runs a random test
|
||||
func (t *Test) randomTest() {
|
||||
t.kick()
|
||||
i := rand.Intn(len(t.tests))
|
||||
t.tests[i]()
|
||||
}
|
||||
|
||||
// logf logs things - not shown unless -v
|
||||
func (t *Test) logf(format string, a ...interface{}) {
|
||||
if *verbose {
|
||||
log.Printf(t.prefix+format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// errorf logs errors
|
||||
func (t *Test) errorf(format string, a ...interface{}) {
|
||||
log.Printf(t.prefix+"ERROR: "+format, a...)
|
||||
}
|
||||
|
||||
// list test
|
||||
func (t *Test) list() {
|
||||
t.logf("list")
|
||||
fis, err := ioutil.ReadDir(t.dir)
|
||||
if err != nil {
|
||||
t.errorf("%s: failed to read directory: %v", t.dir, err)
|
||||
return
|
||||
}
|
||||
if t.created && len(fis) == 0 {
|
||||
t.errorf("%s: expecting entries in directory, got none", t.dir)
|
||||
return
|
||||
}
|
||||
found := false
|
||||
for _, fi := range fis {
|
||||
if fi.Name() == t.name {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if t.created {
|
||||
if !found {
|
||||
t.errorf("%s: expecting to find %q in directory, got none", t.dir, t.name)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if found {
|
||||
t.errorf("%s: not expecting to find %q in directory, got none", t.dir, t.name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// path returns the current path to the item
|
||||
func (t *Test) path() string {
|
||||
return path.Join(t.dir, t.name)
|
||||
}
|
||||
|
||||
// rename test
|
||||
func (t *Test) rename() {
|
||||
if !t.created {
|
||||
return
|
||||
}
|
||||
t.logf("rename")
|
||||
NewName := RandomString(*nameLength)
|
||||
newPath := path.Join(t.dir, NewName)
|
||||
err := os.Rename(t.path(), newPath)
|
||||
if err != nil {
|
||||
t.errorf("failed to rename to %q: %v", newPath, err)
|
||||
return
|
||||
}
|
||||
t.name = NewName
|
||||
}
|
||||
|
||||
// close test
|
||||
func (t *Test) close() {
|
||||
if t.handle == nil {
|
||||
return
|
||||
}
|
||||
t.logf("close")
|
||||
err := t.handle.Close()
|
||||
t.handle = nil
|
||||
if err != nil {
|
||||
t.errorf("failed to close: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// open test
|
||||
func (t *Test) open() {
|
||||
t.close()
|
||||
t.logf("open")
|
||||
handle, err := os.OpenFile(t.path(), os.O_RDWR|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
t.errorf("failed to open: %v", err)
|
||||
return
|
||||
}
|
||||
t.handle = handle
|
||||
t.created = true
|
||||
}
|
||||
|
||||
// read test
|
||||
func (t *Test) read() {
|
||||
if t.handle == nil {
|
||||
return
|
||||
}
|
||||
t.logf("read")
|
||||
bytes := make([]byte, 10)
|
||||
_, err := t.handle.Read(bytes)
|
||||
if err != nil && err != io.EOF {
|
||||
t.errorf("failed to read: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// write test
|
||||
func (t *Test) write() {
|
||||
if t.handle == nil {
|
||||
return
|
||||
}
|
||||
t.logf("write")
|
||||
bytes := make([]byte, 10)
|
||||
_, err := t.handle.Write(bytes)
|
||||
if err != nil {
|
||||
t.errorf("failed to write: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// remove test
|
||||
func (t *Test) remove() {
|
||||
if !t.created {
|
||||
return
|
||||
}
|
||||
t.logf("remove")
|
||||
err := os.Remove(t.path())
|
||||
if err != nil {
|
||||
t.errorf("failed to remove: %v", err)
|
||||
return
|
||||
}
|
||||
t.created = false
|
||||
}
|
||||
|
||||
// mkdir test
|
||||
func (t *Test) mkdir() {
|
||||
if t.created {
|
||||
return
|
||||
}
|
||||
t.logf("mkdir")
|
||||
err := os.Mkdir(t.path(), 0777)
|
||||
if err != nil {
|
||||
t.errorf("failed to mkdir %q", t.path())
|
||||
return
|
||||
}
|
||||
t.created = true
|
||||
}
|
||||
|
||||
// rmdir test
|
||||
func (t *Test) rmdir() {
|
||||
if !t.created {
|
||||
return
|
||||
}
|
||||
t.logf("rmdir")
|
||||
err := os.Remove(t.path())
|
||||
if err != nil {
|
||||
t.errorf("failed to rmdir %q", t.path())
|
||||
return
|
||||
}
|
||||
t.created = false
|
||||
}
|
||||
|
||||
// Tidy removes any stray files and stops the deadlock timer
|
||||
func (t *Test) Tidy() {
|
||||
t.timer.Stop()
|
||||
if !t.isDir {
|
||||
t.close()
|
||||
t.remove()
|
||||
} else {
|
||||
t.rmdir()
|
||||
}
|
||||
t.logf("finished")
|
||||
}
|
||||
|
||||
// RandomTests runs random tests with deadlock detection
|
||||
func (t *Test) RandomTests(iterations int, quit chan struct{}) {
|
||||
var finished = make(chan struct{})
|
||||
go func() {
|
||||
for i := 0; i < iterations; i++ {
|
||||
t.randomTest()
|
||||
}
|
||||
close(finished)
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
case <-quit:
|
||||
quit <- struct{}{}
|
||||
case <-t.timer.C:
|
||||
t.errorf("deadlock detected")
|
||||
quit <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("%s: Syntax [opts] <directory>", os.Args[0])
|
||||
}
|
||||
dir := args[0]
|
||||
_ = os.MkdirAll(dir, 0777)
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
quit = make(chan struct{}, *iterations)
|
||||
)
|
||||
for i := 0; i < *number; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
t := NewTest(dir)
|
||||
defer t.Tidy()
|
||||
t.RandomTests(*iterations, quit)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
Reference in New Issue
Block a user