mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 12:23:15 +00:00
Compare commits
1 Commits
fix-metada
...
encoder-nf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24835344eb |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -283,7 +283,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
run: bin/check_autogenerated_edits.py
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
android:
|
||||
|
||||
@@ -571,6 +571,8 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
|
||||
@@ -74,14 +74,13 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||
* OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||
|
||||
@@ -117,22 +117,16 @@ func init() {
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning1", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning1":
|
||||
case "warning":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning2", true, "config_warning", `Warning
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
|
||||
case "warning2":
|
||||
// Warn the user that rclone can no longer download photos it didnt upload from google photos
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`)
|
||||
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -979,24 +979,6 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
|
||||
return nil
|
||||
}
|
||||
|
||||
// untrash a file or directory by ID
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the restored item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
IDs: IDs,
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil {
|
||||
return fmt.Errorf("untrash object failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
@@ -1081,14 +1063,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return f.waitTask(ctx, info.TaskID)
|
||||
}
|
||||
|
||||
// Move the object to a new parent folder
|
||||
//
|
||||
// Objects cannot be moved to their current folder.
|
||||
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the moved item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
// Move the object
|
||||
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
@@ -1104,12 +1079,6 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
}
|
||||
|
||||
// renames the object
|
||||
//
|
||||
// The new name must be different from the current name.
|
||||
// "file_rename_to_same_name" (3): Name of file or folder is not changed
|
||||
//
|
||||
// Within the same folder, object names must be unique.
|
||||
// "file_duplicated_name" (3): File name cannot be repeated
|
||||
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
|
||||
req := api.File{
|
||||
Name: f.opt.Enc.FromStandardName(newName),
|
||||
@@ -1194,13 +1163,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1211,74 +1185,31 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcObj.parent != dstParentID {
|
||||
// Perform the move. A numbered copy might be generated upon name collision.
|
||||
if srcParentID != dstParentID {
|
||||
// Do the move
|
||||
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("move: failed to move object %s to new parent %s: %w", srcObj.id, dstParentID, err)
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// FIXME: Restored file might have a numbered name if a conflict occurs
|
||||
if mvErr := f.moveObjects(ctx, []string{srcObj.id}, srcObj.parent); mvErr != nil {
|
||||
fs.Logf(f, "move: couldn't restore original object %q to %q after move failure: %v", dstObj.id, src.Remote(), mvErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Manually update info of moved object to save API calls
|
||||
dstObj.id = srcObj.id
|
||||
dstObj.mimeType = srcObj.mimeType
|
||||
dstObj.gcid = srcObj.gcid
|
||||
dstObj.md5sum = srcObj.md5sum
|
||||
dstObj.hasMetaData = true
|
||||
|
||||
// Find the moved object and any conflict object with the same name.
|
||||
var moved, conflict *api.File
|
||||
_, err = f.listAll(ctx, dstParentID, api.KindOfFile, "false", func(item *api.File) bool {
|
||||
if item.ID == srcObj.id {
|
||||
moved = item
|
||||
if item.Name == dstLeaf {
|
||||
return true
|
||||
}
|
||||
} else if item.Name == dstLeaf {
|
||||
conflict = item
|
||||
if srcLeaf != dstLeaf {
|
||||
// Rename
|
||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
||||
}
|
||||
// Stop early if both found
|
||||
return moved != nil && conflict != nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't locate moved file %q in destination directory %q: %w", srcObj.id, dstParentID, err)
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
if moved == nil {
|
||||
return nil, fmt.Errorf("move: moved file %q not found in destination", srcObj.id)
|
||||
}
|
||||
|
||||
// If moved object already has the correct name, return
|
||||
if moved.Name == dstLeaf {
|
||||
return dstObj, dstObj.setMetaData(moved)
|
||||
}
|
||||
// If name collision, delete conflicting file first
|
||||
if conflict != nil {
|
||||
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't delete conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||
fs.Logf(f, "move: couldn't restore conflicting file: %v", restoreErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file %q to %q: %w", dstObj.id, dstLeaf, err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// copy objects
|
||||
//
|
||||
// Objects cannot be copied to their current folder.
|
||||
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||
//
|
||||
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||
// rename the copied item(s) by appending a numbered suffix. For example,
|
||||
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||
if len(IDs) == 0 {
|
||||
return nil
|
||||
@@ -1302,13 +1233,13 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = srcObj.readMetaData(ctx)
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1323,55 +1254,31 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
fs.Debugf(src, "Can't copy - same parent")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Check for possible conflicts: Pikpak creates numbered copies on name collision.
|
||||
var conflict *api.File
|
||||
_, srcLeaf := dircache.SplitPath(srcObj.remote)
|
||||
if srcLeaf == dstLeaf {
|
||||
if conflict, err = f.readMetaDataForPath(ctx, remote); err == nil {
|
||||
// delete conflicting file
|
||||
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't delete conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||
fs.Logf(f, "copy: couldn't restore conflicting file: %v", restoreErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
dstDir, _ := dircache.SplitPath(remote)
|
||||
dstObj.remote = path.Join(dstDir, srcLeaf)
|
||||
if conflict, err = f.readMetaDataForPath(ctx, dstObj.remote); err == nil {
|
||||
tmpName := conflict.Name + "-rclone-copy-" + random.String(8)
|
||||
if _, err = f.renameObject(ctx, conflict.ID, tmpName); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename conflicting file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, renameErr := f.renameObject(ctx, conflict.ID, conflict.Name); renameErr != nil {
|
||||
fs.Logf(f, "copy: couldn't rename conflicting file back to original: %v", renameErr)
|
||||
}
|
||||
}()
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
// Update info of the copied object with new parent but source name
|
||||
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
} else if err = dstObj.setMetaData(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Can't copy and change name in one step so we have to check if we have
|
||||
// the correct name after copy
|
||||
srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
return f.Move(ctx, dstObj, remote)
|
||||
// Rename
|
||||
info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
171
backend/s3/s3.go
171
backend/s3/s3.go
@@ -149,9 +149,6 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Outscale",
|
||||
Help: "OUTSCALE Object Storage (OOS)",
|
||||
}, {
|
||||
Value: "OVHcloud",
|
||||
Help: "OVHcloud Object Storage",
|
||||
}, {
|
||||
Value: "Petabox",
|
||||
Help: "Petabox Object Storage",
|
||||
@@ -538,59 +535,6 @@ func init() {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Tokyo, Japan",
|
||||
}},
|
||||
}, {
|
||||
// References:
|
||||
// https://help.ovhcloud.com/csm/en-public-cloud-storage-s3-location?id=kb_article_view&sysparm_article=KB0047384
|
||||
// https://support.us.ovhcloud.com/hc/en-us/articles/10667991081107-Endpoints-and-Object-Storage-Geoavailability
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "OVHcloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gra",
|
||||
Help: "Gravelines, France",
|
||||
}, {
|
||||
Value: "rbx",
|
||||
Help: "Roubaix, France",
|
||||
}, {
|
||||
Value: "sbg",
|
||||
Help: "Strasbourg, France",
|
||||
}, {
|
||||
Value: "eu-west-par",
|
||||
Help: "Paris, France (3AZ)",
|
||||
}, {
|
||||
Value: "de",
|
||||
Help: "Frankfurt, Germany",
|
||||
}, {
|
||||
Value: "uk",
|
||||
Help: "London, United Kingdom",
|
||||
}, {
|
||||
Value: "waw",
|
||||
Help: "Warsaw, Poland",
|
||||
}, {
|
||||
Value: "bhs",
|
||||
Help: "Beauharnois, Canada",
|
||||
}, {
|
||||
Value: "ca-east-tor",
|
||||
Help: "Toronto, Canada",
|
||||
}, {
|
||||
Value: "sgp",
|
||||
Help: "Singapore",
|
||||
}, {
|
||||
Value: "ap-southeast-syd",
|
||||
Help: "Sydney, Australia",
|
||||
}, {
|
||||
Value: "ap-south-mum",
|
||||
Help: "Mumbai, India",
|
||||
}, {
|
||||
Value: "us-east-va",
|
||||
Help: "Vint Hill, Virginia, USA",
|
||||
}, {
|
||||
Value: "us-west-or",
|
||||
Help: "Hillsboro, Oregon, USA",
|
||||
}, {
|
||||
Value: "rbx-archive",
|
||||
Help: "Roubaix, France (Cold Archive)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
@@ -643,7 +587,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -1230,71 +1174,6 @@ func init() {
|
||||
Value: "obs.ru-northwest-2.myhuaweicloud.com",
|
||||
Help: "RU-Moscow2",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for OVHcloud Object Storage.",
|
||||
Provider: "OVHcloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.gra.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Gravelines, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.rbx.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Roubaix, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.sbg.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Strasbourg, France",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.eu-west-par.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Paris, France (3AZ)",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.de.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Frankfurt, Germany",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.uk.io.cloud.ovh.net",
|
||||
Help: "OVHcloud London, United Kingdom",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.waw.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Warsaw, Poland",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.bhs.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Beauharnois, Canada",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ca-east-tor.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Toronto, Canada",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.sgp.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Singapore",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-syd.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Sydney, Australia",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.ap-south-mum.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Mumbai, India",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.us-east-va.io.cloud.ovh.us",
|
||||
Help: "OVHcloud Vint Hill, Virginia, USA",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.us-west-or.io.cloud.ovh.us",
|
||||
Help: "OVHcloud Hillsboro, Oregon, USA",
|
||||
Provider: "OVHcloud",
|
||||
}, {
|
||||
Value: "s3.rbx-archive.io.cloud.ovh.net",
|
||||
Help: "OVHcloud Roubaix, France (Cold Archive)",
|
||||
Provider: "OVHcloud",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Scaleway Object Storage.",
|
||||
@@ -1532,7 +1411,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -2067,7 +1946,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -3710,8 +3589,6 @@ func setQuirks(opt *Options) {
|
||||
useAlreadyExists = false // untested
|
||||
case "Outscale":
|
||||
virtualHostStyle = false
|
||||
case "OVHcloud":
|
||||
// No quirks
|
||||
case "RackCorp":
|
||||
// No quirks
|
||||
useMultipartEtag = false // untested
|
||||
@@ -4581,7 +4458,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
foundItems += len(resp.Contents)
|
||||
for i, object := range resp.Contents {
|
||||
remote := *stringClone(deref(object.Key))
|
||||
remote := deref(object.Key)
|
||||
if urlEncodeListings {
|
||||
remote, err = url.QueryUnescape(remote)
|
||||
if err != nil {
|
||||
@@ -5184,11 +5061,8 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: parts,
|
||||
},
|
||||
RequestPayer: req.RequestPayer,
|
||||
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -6037,7 +5911,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func s3MetadataToMap(s3Meta map[string]string) map[string]string {
|
||||
meta := make(map[string]string, len(s3Meta))
|
||||
for k, v := range s3Meta {
|
||||
meta[strings.ToLower(k)] = *stringClone(v)
|
||||
meta[strings.ToLower(k)] = v
|
||||
}
|
||||
return meta
|
||||
}
|
||||
@@ -6080,14 +5954,14 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
}
|
||||
o.mimeType = strings.Clone(deref(resp.ContentType))
|
||||
o.mimeType = deref(resp.ContentType)
|
||||
|
||||
// Set system metadata
|
||||
o.storageClass = stringClone(string(resp.StorageClass))
|
||||
o.cacheControl = stringClonePointer(resp.CacheControl)
|
||||
o.contentDisposition = stringClonePointer(resp.ContentDisposition)
|
||||
o.contentEncoding = stringClonePointer(resp.ContentEncoding)
|
||||
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
|
||||
o.storageClass = (*string)(&resp.StorageClass)
|
||||
o.cacheControl = resp.CacheControl
|
||||
o.contentDisposition = resp.ContentDisposition
|
||||
o.contentEncoding = resp.ContentEncoding
|
||||
o.contentLanguage = resp.ContentLanguage
|
||||
|
||||
// If decompressing then size and md5sum are unknown
|
||||
if o.fs.opt.Decompress && deref(o.contentEncoding) == "gzip" {
|
||||
@@ -6572,11 +6446,8 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: w.completedParts,
|
||||
},
|
||||
RequestPayer: w.multiPartUploadInput.RequestPayer,
|
||||
SSECustomerAlgorithm: w.multiPartUploadInput.SSECustomerAlgorithm,
|
||||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
UploadId: w.uploadID,
|
||||
RequestPayer: w.multiPartUploadInput.RequestPayer,
|
||||
UploadId: w.uploadID,
|
||||
})
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -6605,8 +6476,8 @@ func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.R
|
||||
}
|
||||
|
||||
var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
|
||||
gotETag = *stringClone(s3cw.eTag)
|
||||
versionID = stringClone(s3cw.versionID)
|
||||
gotETag = s3cw.eTag
|
||||
versionID = aws.String(s3cw.versionID)
|
||||
|
||||
hashOfHashes := md5.Sum(s3cw.md5s)
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(s3cw.completedParts))
|
||||
@@ -6638,8 +6509,8 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
|
||||
}
|
||||
lastModified = time.Now()
|
||||
if resp != nil {
|
||||
etag = *stringClone(deref(resp.ETag))
|
||||
versionID = stringClonePointer(resp.VersionId)
|
||||
etag = deref(resp.ETag)
|
||||
versionID = resp.VersionId
|
||||
}
|
||||
return etag, lastModified, versionID, nil
|
||||
}
|
||||
@@ -6691,8 +6562,8 @@ func (o *Object) uploadSinglepartPresignedRequest(ctx context.Context, req *s3.P
|
||||
if date, err := http.ParseTime(resp.Header.Get("Date")); err != nil {
|
||||
lastModified = date
|
||||
}
|
||||
etag = *stringClone(resp.Header.Get("Etag"))
|
||||
vID := *stringClone(resp.Header.Get("x-amz-version-id"))
|
||||
etag = resp.Header.Get("Etag")
|
||||
vID := resp.Header.Get("x-amz-version-id")
|
||||
if vID != "" {
|
||||
versionID = &vID
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{}
|
||||
if f.opt.UseKerberos {
|
||||
cl, err := NewKerberosFactory().GetClient(f.opt.KerberosCCache)
|
||||
cl, err := createKerberosClient(f.opt.KerberosCCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,95 +7,17 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
)
|
||||
|
||||
// KerberosFactory encapsulates dependencies and caches for Kerberos clients.
|
||||
type KerberosFactory struct {
|
||||
// clientCache caches Kerberos clients keyed by resolved ccache path.
|
||||
// Clients are reused unless the associated ccache file changes.
|
||||
clientCache sync.Map // map[string]*client.Client
|
||||
var (
|
||||
kerberosClient sync.Map // map[string]*client.Client
|
||||
kerberosErr sync.Map // map[string]error
|
||||
)
|
||||
|
||||
// errCache caches errors encountered when loading Kerberos clients.
|
||||
// Prevents repeated attempts for paths that previously failed.
|
||||
errCache sync.Map // map[string]error
|
||||
|
||||
// modTimeCache tracks the last known modification time of ccache files.
|
||||
// Used to detect changes and trigger credential refresh.
|
||||
modTimeCache sync.Map // map[string]time.Time
|
||||
|
||||
loadCCache func(string) (*credentials.CCache, error)
|
||||
newClient func(*credentials.CCache, *config.Config, ...func(*client.Settings)) (*client.Client, error)
|
||||
loadConfig func() (*config.Config, error)
|
||||
}
|
||||
|
||||
// NewKerberosFactory creates a new instance of KerberosFactory with default dependencies.
|
||||
func NewKerberosFactory() *KerberosFactory {
|
||||
return &KerberosFactory{
|
||||
loadCCache: credentials.LoadCCache,
|
||||
newClient: client.NewFromCCache,
|
||||
loadConfig: defaultLoadKerberosConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// GetClient returns a cached Kerberos client or creates a new one if needed.
|
||||
func (kf *KerberosFactory) GetClient(ccachePath string) (*client.Client, error) {
|
||||
resolvedPath, err := resolveCcachePath(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stat, err := os.Stat(resolvedPath)
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
mtime := stat.ModTime()
|
||||
|
||||
if oldMod, ok := kf.modTimeCache.Load(resolvedPath); ok {
|
||||
if oldTime, ok := oldMod.(time.Time); ok && oldTime.Equal(mtime) {
|
||||
if errVal, ok := kf.errCache.Load(resolvedPath); ok {
|
||||
return nil, errVal.(error)
|
||||
}
|
||||
if clientVal, ok := kf.clientCache.Load(resolvedPath); ok {
|
||||
return clientVal.(*client.Client), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load Kerberos config
|
||||
cfg, err := kf.loadConfig()
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load ccache
|
||||
ccache, err := kf.loadCCache(resolvedPath)
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create new client
|
||||
cl, err := kf.newClient(ccache, cfg)
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Cache and return
|
||||
kf.clientCache.Store(resolvedPath, cl)
|
||||
kf.errCache.Delete(resolvedPath)
|
||||
kf.modTimeCache.Store(resolvedPath, mtime)
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
// resolveCcachePath resolves the KRB5 ccache path.
|
||||
func resolveCcachePath(ccachePath string) (string, error) {
|
||||
if ccachePath == "" {
|
||||
ccachePath = os.Getenv("KRB5CCNAME")
|
||||
@@ -128,11 +50,45 @@ func resolveCcachePath(ccachePath string) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// defaultLoadKerberosConfig loads Kerberos config from default or env path.
|
||||
func defaultLoadKerberosConfig() (*config.Config, error) {
|
||||
func loadKerberosConfig() (*config.Config, error) {
|
||||
cfgPath := os.Getenv("KRB5_CONFIG")
|
||||
if cfgPath == "" {
|
||||
cfgPath = "/etc/krb5.conf"
|
||||
}
|
||||
return config.Load(cfgPath)
|
||||
}
|
||||
|
||||
// createKerberosClient creates a new Kerberos client.
|
||||
func createKerberosClient(ccachePath string) (*client.Client, error) {
|
||||
ccachePath, err := resolveCcachePath(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check if we already have a client or an error for this ccache path
|
||||
if errVal, ok := kerberosErr.Load(ccachePath); ok {
|
||||
return nil, errVal.(error)
|
||||
}
|
||||
if clientVal, ok := kerberosClient.Load(ccachePath); ok {
|
||||
return clientVal.(*client.Client), nil
|
||||
}
|
||||
|
||||
// create a new client if not found in the map
|
||||
cfg, err := loadKerberosConfig()
|
||||
if err != nil {
|
||||
kerberosErr.Store(ccachePath, err)
|
||||
return nil, err
|
||||
}
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
kerberosErr.Store(ccachePath, err)
|
||||
return nil, err
|
||||
}
|
||||
cl, err := client.NewFromCCache(ccache, cfg)
|
||||
if err != nil {
|
||||
kerberosErr.Store(ccachePath, err)
|
||||
return nil, err
|
||||
}
|
||||
kerberosClient.Store(ccachePath, cl)
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
@@ -4,11 +4,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -81,62 +77,3 @@ func TestResolveCcachePath(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKerberosFactory_GetClient_ReloadOnCcacheChange(t *testing.T) {
|
||||
// Create temp ccache file
|
||||
tmpFile, err := os.CreateTemp("", "krb5cc_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
t.Logf("Failed to remove temp file %s: %v", tmpFile.Name(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
unixPath := filepath.ToSlash(tmpFile.Name())
|
||||
ccachePath := "FILE:" + unixPath
|
||||
|
||||
initialContent := []byte("CCACHE_VERSION 4\n")
|
||||
_, err = tmpFile.Write(initialContent)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, tmpFile.Close())
|
||||
|
||||
// Setup mocks
|
||||
loadCallCount := 0
|
||||
mockLoadCCache := func(path string) (*credentials.CCache, error) {
|
||||
loadCallCount++
|
||||
return &credentials.CCache{}, nil
|
||||
}
|
||||
|
||||
mockNewClient := func(cc *credentials.CCache, cfg *config.Config, opts ...func(*client.Settings)) (*client.Client, error) {
|
||||
return &client.Client{}, nil
|
||||
}
|
||||
|
||||
mockLoadConfig := func() (*config.Config, error) {
|
||||
return &config.Config{}, nil
|
||||
}
|
||||
factory := &KerberosFactory{
|
||||
loadCCache: mockLoadCCache,
|
||||
newClient: mockNewClient,
|
||||
loadConfig: mockLoadConfig,
|
||||
}
|
||||
|
||||
// First call — triggers loading
|
||||
_, err = factory.GetClient(ccachePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, loadCallCount, "expected 1 load call")
|
||||
|
||||
// Second call — should reuse cache, no additional load
|
||||
_, err = factory.GetClient(ccachePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, loadCallCount, "expected cached reuse, no new load")
|
||||
|
||||
// Simulate file update
|
||||
time.Sleep(1 * time.Second) // ensure mtime changes
|
||||
err = os.WriteFile(tmpFile.Name(), []byte("CCACHE_VERSION 4\n#updated"), 0600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Third call — should detect change, reload
|
||||
_, err = factory.GetClient(ccachePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, loadCallCount, "expected reload on changed ccache")
|
||||
}
|
||||
|
||||
@@ -4,12 +4,12 @@ This script checks for unauthorized modifications in autogenerated sections of m
|
||||
It is designed to be used in a GitHub Actions workflow or a local pre-commit hook.
|
||||
|
||||
Features:
|
||||
- Detects markdown files changed between a commit and one of its ancestors. Default is to
|
||||
check the last commit only. When triggered on a pull request it should typically compare the
|
||||
pull request branch head and its merge base - the commit on the main branch before it diverged.
|
||||
- Detects markdown files changed in the last commit.
|
||||
- Identifies modified autogenerated sections marked by specific comments.
|
||||
- Reports violations using GitHub Actions error messages.
|
||||
- Exits with a nonzero status code if unauthorized changes are found.
|
||||
|
||||
It currently only checks the last commit.
|
||||
"""
|
||||
|
||||
import re
|
||||
@@ -22,18 +22,18 @@ def run_git(args):
|
||||
"""
|
||||
return subprocess.run(["git"] + args, stdout=subprocess.PIPE, text=True, check=True).stdout.strip()
|
||||
|
||||
def get_changed_files(base, head):
|
||||
def get_changed_files():
|
||||
"""
|
||||
Retrieve a list of markdown files that were changed between the base and head commits.
|
||||
Retrieve a list of markdown files that were changed in the last commit.
|
||||
"""
|
||||
files = run_git(["diff", "--name-only", f"{base}...{head}"]).splitlines()
|
||||
files = run_git(["diff", "--name-only", "HEAD~1", "HEAD"]).splitlines()
|
||||
return [f for f in files if f.endswith(".md")]
|
||||
|
||||
def get_diff(file, base, head):
|
||||
def get_diff(file):
|
||||
"""
|
||||
Get the diff of a given file between the base and head commits.
|
||||
Get the diff of a given file between the last commit and the current version.
|
||||
"""
|
||||
return run_git(["diff", "-U0", f"{base}...{head}", "--", file]).splitlines()
|
||||
return run_git(["diff", "-U0", "HEAD~1", "HEAD", "--", file]).splitlines()
|
||||
|
||||
def get_file_content(ref, file):
|
||||
"""
|
||||
@@ -70,7 +70,7 @@ def show_error(file_name, line, message):
|
||||
"""
|
||||
print(f"::error file={file_name},line={line}::{message} at {file_name} line {line}")
|
||||
|
||||
def check_file(file, base, head):
|
||||
def check_file(file):
|
||||
"""
|
||||
Check a markdown file for modifications in autogenerated regions.
|
||||
"""
|
||||
@@ -84,7 +84,7 @@ def check_file(file, base, head):
|
||||
|
||||
# Entire autogenerated file check.
|
||||
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
|
||||
if get_diff(file, base, head):
|
||||
if get_diff(file):
|
||||
show_error(file, 1, "Autogenerated file modified")
|
||||
return True
|
||||
return False
|
||||
@@ -92,7 +92,7 @@ def check_file(file, base, head):
|
||||
# Partial autogenerated regions.
|
||||
regions_new = find_regions(new_lines)
|
||||
regions_old = find_regions(old_lines)
|
||||
diff = get_diff(file, base, head)
|
||||
diff = get_diff(file)
|
||||
hunk_re = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@")
|
||||
new_ln = old_ln = None
|
||||
|
||||
@@ -124,15 +124,9 @@ def main():
|
||||
"""
|
||||
Main function that iterates over changed files and checks them for violations.
|
||||
"""
|
||||
base = "HEAD~1"
|
||||
head = "HEAD"
|
||||
if len(sys.argv) > 1:
|
||||
base = sys.argv[1]
|
||||
if len(sys.argv) > 2:
|
||||
head = sys.argv[2]
|
||||
found = False
|
||||
for f in get_changed_files(base, head):
|
||||
if check_file(f, base, head):
|
||||
for f in get_changed_files():
|
||||
if check_file(f):
|
||||
found = True
|
||||
if found:
|
||||
sys.exit(1)
|
||||
|
||||
@@ -316,10 +316,10 @@ See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
When using NFS mount on macOS, if you don't specify |--vfs-cache-mode|
|
||||
the mount point will be read-only.
|
||||
|
||||
Bucket-based remotes - Azure Blob, Swift, S3, Google Cloud Storage and B2 -
|
||||
can't store empty directories. Of these, only Azure Blob, Google Cloud Storage
|
||||
and S3 can preserve them when you add `--xxx-directory_markers`; otherwise,
|
||||
empty directories will vanish once they drop out of the directory cache.
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
||||
do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone
|
||||
program will wait for the background mount to become ready or until the timeout
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -37,11 +35,6 @@ implement this command directly, in which case ` + "`--checkers`" + ` will be ig
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDir(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
ctx := context.Background()
|
||||
fi := filter.GetConfig(ctx)
|
||||
if !fi.InActive() {
|
||||
fs.Fatalf(nil, "filters are not supported with purge (purge will delete everything unconditionally)")
|
||||
}
|
||||
return operations.Purge(context.Background(), fdst, "")
|
||||
})
|
||||
},
|
||||
|
||||
@@ -158,14 +158,13 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Microsoft OneDrive" home="https://onedrive.live.com/" config="/onedrive/" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Nextcloud" home="https://nextcloud.com/" config="/webdav/#nextcloud" >}}
|
||||
{{< provider name="OVH" home="https://www.ovh.co.uk/public-cloud/storage/object-storage/" config="/swift/" >}}
|
||||
{{< provider name="Blomp Cloud Storage" home="https://rclone.org/swift/" config="/swift/" >}}
|
||||
{{< provider name="OpenDrive" home="https://www.opendrive.com/" config="/opendrive/" >}}
|
||||
{{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}}
|
||||
{{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}}
|
||||
{{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="OVHcloud Object Storage (Swift)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/swift/" >}}
|
||||
{{< provider name="OVHcloud Object Storage (S3-compatible)" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
|
||||
{{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
|
||||
{{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
|
||||
@@ -422,7 +422,7 @@ put them back in again.` >}}
|
||||
* Dov Murik <dov.murik@gmail.com>
|
||||
* Ameer Dawood <ameer1234567890@gmail.com>
|
||||
* Dan Hipschman <dan.hipschman@opendoor.com>
|
||||
* Josh Soref <jsoref@users.noreply.github.com> <2119212+jsoref@users.noreply.github.com>
|
||||
* Josh Soref <jsoref@users.noreply.github.com>
|
||||
* David <david@staron.nl>
|
||||
* Ingo <ingo@hoffmann.cx>
|
||||
* Adam Plánský <adamplansky@users.noreply.github.com> <adamplansky@gmail.com>
|
||||
@@ -637,7 +637,7 @@ put them back in again.` >}}
|
||||
* anonion <aman207@users.noreply.github.com>
|
||||
* Ryan Morey <4590343+rmorey@users.noreply.github.com>
|
||||
* Simon Bos <simonbos9@gmail.com>
|
||||
* YFdyh000 <yfdyh000@gmail.com>
|
||||
* YFdyh000 <yfdyh000@gmail.com> * Josh Soref <2119212+jsoref@users.noreply.github.com>
|
||||
* Øyvind Heddeland Instefjord <instefjord@outlook.com>
|
||||
* Dmitry Deniskin <110819396+ddeniskin@users.noreply.github.com>
|
||||
* Alexander Knorr <106825+opexxx@users.noreply.github.com>
|
||||
@@ -991,4 +991,3 @@ put them back in again.` >}}
|
||||
* Ross Smith II <ross@smithii.com>
|
||||
* Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com>
|
||||
* Sudipto Baral <sudiptobaral.me@gmail.com>
|
||||
* Sam Pegg <samrpegg@gmail.com>
|
||||
|
||||
@@ -377,6 +377,8 @@ will show you the defaults for the backends.
|
||||
| Exclamation | `!` | `!` |
|
||||
| Hash | `#` | `#` |
|
||||
| InvalidUtf8 | An invalid UTF-8 character (e.g. latin1) | `<60>` |
|
||||
| ForceNFC | All invalid NFC characters | Their valid NFC equivalents |
|
||||
| ForceNFD | All invalid NFD characters | Their valid NFD equivalents |
|
||||
| LeftCrLfHtVt | CR 0x0D, LF 0x0A, HT 0x09, VT 0x0B on the left of a string | `␍`, `␊`, `␉`, `␋` |
|
||||
| LeftPeriod | `.` on the left of a string | `.` |
|
||||
| LeftSpace | SPACE on the left of a string | `␠` |
|
||||
|
||||
@@ -29,7 +29,6 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="MEGA S4 Object Storage" home="https://mega.io/objectstorage" config="/s3/#mega" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}}
|
||||
{{< provider name="OVHcloud" home="https://www.ovhcloud.com/en/public-cloud/object-storage/" config="/s3/#ovhcloud" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
{{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
@@ -3612,206 +3611,6 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### OVHcloud {#ovhcloud}
|
||||
|
||||
[OVHcloud Object Storage](https://www.ovhcloud.com/en-ie/public-cloud/object-storage/)
|
||||
is an S3-compatible general-purpose object storage platform available in all OVHcloud regions.
|
||||
To use the platform, you will need an access key and secret key. To know more about it and how
|
||||
to interact with the platform, take a look at the [documentation](https://ovh.to/8stqhuo).
|
||||
|
||||
Here is an example of making an OVHcloud Object Storage configuration with `rclone config`:
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> ovhcloud-rbx
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[...]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Outscale, OVHcloud, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
|
||||
\ (s3)
|
||||
[...]
|
||||
Storage> s3
|
||||
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[...]
|
||||
XX / OVHcloud Object Storage
|
||||
\ (OVHcloud)
|
||||
[...]
|
||||
provider> OVHcloud
|
||||
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth> 1
|
||||
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> my_access
|
||||
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> my_secret
|
||||
|
||||
Option region.
|
||||
Region where your bucket will be created and your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Gravelines, France
|
||||
\ (gra)
|
||||
2 / Roubaix, France
|
||||
\ (rbx)
|
||||
3 / Strasbourg, France
|
||||
\ (sbg)
|
||||
4 / Paris, France (3AZ)
|
||||
\ (eu-west-par)
|
||||
5 / Frankfurt, Germany
|
||||
\ (de)
|
||||
6 / London, United Kingdom
|
||||
\ (uk)
|
||||
7 / Warsaw, Poland
|
||||
\ (waw)
|
||||
8 / Beauharnois, Canada
|
||||
\ (bhs)
|
||||
9 / Toronto, Canada
|
||||
\ (ca-east-tor)
|
||||
10 / Singapore
|
||||
\ (sgp)
|
||||
11 / Sydney, Australia
|
||||
\ (ap-southeast-syd)
|
||||
12 / Mumbai, India
|
||||
\ (ap-south-mum)
|
||||
13 / Vint Hill, Virginia, USA
|
||||
\ (us-east-va)
|
||||
14 / Hillsboro, Oregon, USA
|
||||
\ (us-west-or)
|
||||
15 / Roubaix, France (Cold Archive)
|
||||
\ (rbx-archive)
|
||||
region> 2
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for OVHcloud Object Storage.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / OVHcloud Gravelines, France
|
||||
\ (s3.gra.io.cloud.ovh.net)
|
||||
2 / OVHcloud Roubaix, France
|
||||
\ (s3.rbx.io.cloud.ovh.net)
|
||||
3 / OVHcloud Strasbourg, France
|
||||
\ (s3.sbg.io.cloud.ovh.net)
|
||||
4 / OVHcloud Paris, France (3AZ)
|
||||
\ (s3.eu-west-par.io.cloud.ovh.net)
|
||||
5 / OVHcloud Frankfurt, Germany
|
||||
\ (s3.de.io.cloud.ovh.net)
|
||||
6 / OVHcloud London, United Kingdom
|
||||
\ (s3.uk.io.cloud.ovh.net)
|
||||
7 / OVHcloud Warsaw, Poland
|
||||
\ (s3.waw.io.cloud.ovh.net)
|
||||
8 / OVHcloud Beauharnois, Canada
|
||||
\ (s3.bhs.io.cloud.ovh.net)
|
||||
9 / OVHcloud Toronto, Canada
|
||||
\ (s3.ca-east-tor.io.cloud.ovh.net)
|
||||
10 / OVHcloud Singapore
|
||||
\ (s3.sgp.io.cloud.ovh.net)
|
||||
11 / OVHcloud Sydney, Australia
|
||||
\ (s3.ap-southeast-syd.io.cloud.ovh.net)
|
||||
12 / OVHcloud Mumbai, India
|
||||
\ (s3.ap-south-mum.io.cloud.ovh.net)
|
||||
13 / OVHcloud Vint Hill, Virginia, USA
|
||||
\ (s3.us-east-va.io.cloud.ovh.us)
|
||||
14 / OVHcloud Hillsboro, Oregon, USA
|
||||
\ (s3.us-west-or.io.cloud.ovh.us)
|
||||
15 / OVHcloud Roubaix, France (Cold Archive)
|
||||
\ (s3.rbx-archive.io.cloud.ovh.net)
|
||||
endpoint> 2
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
/ Owner gets FULL_CONTROL.
|
||||
1 | No one else has access rights (default).
|
||||
\ (private)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
2 | The AllUsers group gets READ access.
|
||||
\ (public-read)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
3 | The AllUsers group gets READ and WRITE access.
|
||||
| Granting this on a bucket is generally not recommended.
|
||||
\ (public-read-write)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
4 | The AuthenticatedUsers group gets READ access.
|
||||
\ (authenticated-read)
|
||||
/ Object owner gets FULL_CONTROL.
|
||||
5 | Bucket owner gets READ access.
|
||||
| If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
\ (bucket-owner-read)
|
||||
/ Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
6 | If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
\ (bucket-owner-full-control)
|
||||
acl> 1
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: OVHcloud
|
||||
- access_key_id: my_access
|
||||
- secret_access_key: my_secret
|
||||
- region: rbx
|
||||
- endpoint: s3.rbx.io.cloud.ovh.net
|
||||
- acl: private
|
||||
Keep this "ovhcloud-rbx" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Your configuration file should now look like this:
|
||||
|
||||
```
|
||||
[ovhcloud-rbx]
|
||||
type = s3
|
||||
provider = OVHcloud
|
||||
access_key_id = my_access
|
||||
secret_access_key = my_secret
|
||||
region = rbx
|
||||
endpoint = s3.rbx.io.cloud.ovh.net
|
||||
acl = private
|
||||
```
|
||||
|
||||
|
||||
### Qiniu Cloud Object Storage (Kodo) {#qiniu}
|
||||
|
||||
[Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management.
|
||||
|
||||
@@ -56,7 +56,6 @@ off donation.
|
||||
|
||||
Thank you very much to our sponsors:
|
||||
|
||||
{{< sponsor src="/img/logos/backblaze.svg" width="300" height="200" title="Visit our sponsor Backblaze" link="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715">}}
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
|
||||
@@ -11,7 +11,7 @@ Commercial implementations of that being:
|
||||
|
||||
* [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/)
|
||||
* [Memset Memstore](https://www.memset.com/cloud/storage/)
|
||||
* [OVH Object Storage](https://www.ovhcloud.com/en/public-cloud/object-storage/)
|
||||
* [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
|
||||
* [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
|
||||
* [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/)
|
||||
* [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
|
||||
|
||||
@@ -6,12 +6,6 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="{{ .Description }}">
|
||||
<meta name="author" content="Nick Craig-Wood">
|
||||
<meta property="og:site_name" content="Rclone" />
|
||||
<meta property="og:type" content="website" />
|
||||
<meta property="og:image" content="/img/rclone-1200x630.png">
|
||||
<meta property="og:url" content="{{ .Permalink }}" />
|
||||
<meta property="og:title" content="{{ .Title }}" />
|
||||
<meta property="og:description" content="{{ .Description }}" />
|
||||
<link rel="shortcut icon" type="image/png" href="/img/rclone-32x32.png"/>
|
||||
<script defer data-domain="rclone.org" src="https://weblog.rclone.org/js/script.js"></script>
|
||||
<title>{{ block "title" . }}{{ .Title }}{{ end }}</title>
|
||||
|
||||
@@ -10,15 +10,6 @@
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
Platinum Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715" target="_blank" rel="noopener" title="Visit rclone's sponsor Backblaze"><img src="/img/logos/backblaze.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
Gold Sponsor
|
||||
|
||||
BIN
docs/static/img/rclone-1200x630.png
vendored
BIN
docs/static/img/rclone-1200x630.png
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 36 KiB |
@@ -391,7 +391,6 @@ func (s *StatsInfo) _stopAverageLoop() {
|
||||
if s.average.started {
|
||||
s.average.cancel()
|
||||
s.average.stopped.Wait()
|
||||
s.average.started = false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -67,44 +67,11 @@ func (m *Metadata) MergeOptions(options []OpenOption) {
|
||||
//
|
||||
// If the object has no metadata then metadata will be nil
|
||||
func GetMetadata(ctx context.Context, o DirEntry) (metadata Metadata, err error) {
|
||||
if do, ok := o.(Metadataer); ok {
|
||||
metadata, err = do.Metadata(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do, ok := o.(Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
if f, ok := o.Fs().(Fs); ok {
|
||||
Debugf(o, "GetMetadata")
|
||||
features := f.Features()
|
||||
if _, isDir := o.(Directory); isDir {
|
||||
// if bucket-based remote listing the root mark directories as buckets
|
||||
isBucket := features.BucketBased && o.Remote() == "" && f.Root() == ""
|
||||
if isBucket {
|
||||
if metadata == nil {
|
||||
metadata = make(Metadata, 1)
|
||||
}
|
||||
metadata["content-type"] = "inode/bucket"
|
||||
}
|
||||
} else if obj, isObj := o.(Object); isObj && !features.SlowHash {
|
||||
// If have hashes and they are not slow then add them here
|
||||
hashes := f.Hashes()
|
||||
if hashes.Count() > 0 {
|
||||
if metadata == nil {
|
||||
metadata = make(Metadata, hashes.Count())
|
||||
}
|
||||
for _, hashType := range hashes.Array() {
|
||||
hash, err := obj.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
Errorf(obj, "failed to read hash: %v", err)
|
||||
} else if hash != "" {
|
||||
metadata["hash:"+hashType.String()] = hash
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return metadata, err
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// mapItem descripts the item to be mapped
|
||||
|
||||
@@ -820,7 +820,7 @@ func rcCheck(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, rc.NewErrParamInvalid(errors.New("need srcFs parameter when not using checkFileHash"))
|
||||
}
|
||||
|
||||
oneway, _ := in.GetBool("oneWay")
|
||||
oneway, _ := in.GetBool("oneway")
|
||||
download, _ := in.GetBool("download")
|
||||
|
||||
opt := &CheckOpt{
|
||||
|
||||
@@ -49,7 +49,7 @@ Parameters:
|
||||
|
||||
Note that these are the global options which are unaffected by use of
|
||||
the _config and _filter parameters. If you wish to read the parameters
|
||||
set in _config or _filter use options/local.
|
||||
set in _config then use options/config and for _filter use options/filter.
|
||||
|
||||
This shows the internal names of the option within rclone which should
|
||||
map to the external options very easily with a few exceptions.
|
||||
|
||||
@@ -67,13 +67,13 @@ backends:
|
||||
# maxfile: 10k
|
||||
# ignore:
|
||||
# - TestApplyTransforms
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerChunk50bYandex:"
|
||||
# fastlist: true
|
||||
# maxfile: 1k
|
||||
# ignore:
|
||||
# # Needs investigation
|
||||
# - TestDeduplicateNewestByHash
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bYandex:"
|
||||
fastlist: true
|
||||
maxfile: 1k
|
||||
ignore:
|
||||
# Needs investigation
|
||||
- TestDeduplicateNewestByHash
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerChunk50bBox:"
|
||||
# fastlist: true
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -61,6 +63,8 @@ const (
|
||||
EncodeRightPeriod // Trailing .
|
||||
EncodeRightCrLfHtVt // Trailing CR LF HT VT
|
||||
EncodeInvalidUtf8 // Invalid UTF-8 bytes
|
||||
EncodeInvalidNFC // Force NFC encoding
|
||||
EncodeInvalidNFD // Force NFD encoding
|
||||
EncodeDot // . and .. names
|
||||
EncodeSquareBracket // []
|
||||
EncodeSemicolon // ;
|
||||
@@ -148,6 +152,8 @@ func init() {
|
||||
alias("RightPeriod", EncodeRightPeriod)
|
||||
alias("RightCrLfHtVt", EncodeRightCrLfHtVt)
|
||||
alias("InvalidUtf8", EncodeInvalidUtf8)
|
||||
alias("ForceNFC", EncodeInvalidNFC)
|
||||
alias("ForceNFD", EncodeInvalidNFD)
|
||||
alias("Dot", EncodeDot)
|
||||
}
|
||||
|
||||
@@ -226,6 +232,13 @@ func (mask MultiEncoder) Encode(in string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
if mask.Has(EncodeInvalidNFD) {
|
||||
in = norm.NFD.String(in)
|
||||
}
|
||||
if mask.Has(EncodeInvalidNFC) {
|
||||
in = norm.NFC.String(in)
|
||||
}
|
||||
|
||||
if mask.Has(EncodeDot) {
|
||||
switch in {
|
||||
case ".":
|
||||
@@ -688,6 +701,15 @@ func (mask MultiEncoder) Decode(in string) string {
|
||||
return in
|
||||
}
|
||||
|
||||
/* // Can't losslessly decode NFC/NFD
|
||||
if mask.Has(EncodeInvalidNFD) {
|
||||
in = norm.NFC.String(in)
|
||||
}
|
||||
if mask.Has(EncodeInvalidNFC) {
|
||||
in = norm.NFD.String(in)
|
||||
}
|
||||
*/
|
||||
|
||||
if mask.Has(EncodeDot) {
|
||||
switch in {
|
||||
case ".":
|
||||
|
||||
@@ -34,7 +34,6 @@ func TestEncodeString(t *testing.T) {
|
||||
got := test.mask.String()
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestEncodeSet(t *testing.T) {
|
||||
@@ -60,7 +59,6 @@ func TestEncodeSet(t *testing.T) {
|
||||
assert.Equal(t, test.wantErr, err != nil, err)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
@@ -175,6 +173,34 @@ func TestEncodeInvalidUnicode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeNFCNFD(t *testing.T) {
|
||||
for i, tc := range []testCase{
|
||||
{
|
||||
mask: EncodeInvalidNFC,
|
||||
in: "Über",
|
||||
out: "Über",
|
||||
},
|
||||
{
|
||||
mask: EncodeInvalidNFD,
|
||||
in: "Über",
|
||||
out: "Über",
|
||||
},
|
||||
} {
|
||||
e := tc.mask
|
||||
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
|
||||
got := e.Encode(tc.in)
|
||||
if got != tc.out {
|
||||
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
|
||||
}
|
||||
// we can't losslessly decode NFC/NFD
|
||||
/* got2 := e.Decode(got)
|
||||
if got2 != tc.in {
|
||||
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
|
||||
} */
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDot(t *testing.T) {
|
||||
for i, tc := range []testCase{
|
||||
{
|
||||
|
||||
@@ -67,6 +67,8 @@ var maskBits = []struct {
|
||||
{encoder.EncodeRightPeriod, "EncodeRightPeriod"},
|
||||
{encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt"},
|
||||
{encoder.EncodeInvalidUtf8, "EncodeInvalidUtf8"},
|
||||
{encoder.EncodeInvalidNFC, "ForceNFC"},
|
||||
{encoder.EncodeInvalidNFD, "ForceNFD"},
|
||||
{encoder.EncodeDot, "EncodeDot"},
|
||||
}
|
||||
|
||||
@@ -82,13 +84,15 @@ var allEdges = []edge{
|
||||
{encoder.EncodeLeftSpace, "EncodeLeftSpace", edgeLeft, []rune{' '}, []rune{'␠'}},
|
||||
{encoder.EncodeLeftPeriod, "EncodeLeftPeriod", edgeLeft, []rune{'.'}, []rune{'.'}},
|
||||
{encoder.EncodeLeftTilde, "EncodeLeftTilde", edgeLeft, []rune{'~'}, []rune{'~'}},
|
||||
{encoder.EncodeLeftCrLfHtVt, "EncodeLeftCrLfHtVt", edgeLeft,
|
||||
{
|
||||
encoder.EncodeLeftCrLfHtVt, "EncodeLeftCrLfHtVt", edgeLeft,
|
||||
[]rune{'\t', '\n', '\v', '\r'},
|
||||
[]rune{'␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r'},
|
||||
},
|
||||
{encoder.EncodeRightSpace, "EncodeRightSpace", edgeRight, []rune{' '}, []rune{'␠'}},
|
||||
{encoder.EncodeRightPeriod, "EncodeRightPeriod", edgeRight, []rune{'.'}, []rune{'.'}},
|
||||
{encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt", edgeRight,
|
||||
{
|
||||
encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt", edgeRight,
|
||||
[]rune{'\t', '\n', '\v', '\r'},
|
||||
[]rune{'␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r'},
|
||||
},
|
||||
@@ -99,102 +103,122 @@ var allMappings = []mapping{{
|
||||
0,
|
||||
}, []rune{
|
||||
'␀',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeSlash, []rune{
|
||||
'/',
|
||||
}, []rune{
|
||||
'/',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeLtGt, []rune{
|
||||
'<', '>',
|
||||
}, []rune{
|
||||
'<', '>',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeSquareBracket, []rune{
|
||||
'[', ']',
|
||||
}, []rune{
|
||||
'[', ']',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeSemicolon, []rune{
|
||||
';',
|
||||
}, []rune{
|
||||
';',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeExclamation, []rune{
|
||||
'!',
|
||||
}, []rune{
|
||||
'!',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeDoubleQuote, []rune{
|
||||
'"',
|
||||
}, []rune{
|
||||
'"',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeSingleQuote, []rune{
|
||||
'\'',
|
||||
}, []rune{
|
||||
''',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeBackQuote, []rune{
|
||||
'`',
|
||||
}, []rune{
|
||||
'`',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeDollar, []rune{
|
||||
'$',
|
||||
}, []rune{
|
||||
'$',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeColon, []rune{
|
||||
':',
|
||||
}, []rune{
|
||||
':',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeQuestion, []rune{
|
||||
'?',
|
||||
}, []rune{
|
||||
'?',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeAsterisk, []rune{
|
||||
'*',
|
||||
}, []rune{
|
||||
'*',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodePipe, []rune{
|
||||
'|',
|
||||
}, []rune{
|
||||
'|',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeHash, []rune{
|
||||
'#',
|
||||
}, []rune{
|
||||
'#',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodePercent, []rune{
|
||||
'%',
|
||||
}, []rune{
|
||||
'%',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeSlash, []rune{
|
||||
'/',
|
||||
}, []rune{
|
||||
'/',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeBackSlash, []rune{
|
||||
'\\',
|
||||
}, []rune{
|
||||
'\',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeCrLf, []rune{
|
||||
rune(0x0D), rune(0x0A),
|
||||
}, []rune{
|
||||
'␍', '␊',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeDel, []rune{
|
||||
0x7F,
|
||||
}, []rune{
|
||||
'␡',
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
encoder.EncodeCtl,
|
||||
runeRange(0x01, 0x1F),
|
||||
runeRange('␁', '␟'),
|
||||
@@ -438,6 +462,7 @@ func fatal(err error, s ...any) {
|
||||
fs.Fatal(nil, fmt.Sprint(append(s, err)))
|
||||
}
|
||||
}
|
||||
|
||||
func fatalW(_ int, err error) func(...any) {
|
||||
if err != nil {
|
||||
return func(s ...any) {
|
||||
@@ -471,12 +496,14 @@ func getMapping(mask encoder.MultiEncoder) mapping {
|
||||
}
|
||||
return mapping{}
|
||||
}
|
||||
|
||||
func collectEncodables(m []mapping) (out []rune) {
|
||||
for _, s := range m {
|
||||
out = append(out, s.src...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func collectEncoded(m []mapping) (out []rune) {
|
||||
for _, s := range m {
|
||||
out = append(out, s.dst...)
|
||||
|
||||
Reference in New Issue
Block a user