mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
docs: fix markdownlint issue md046/code-block-style in backend command docs
This commit is contained in:
@@ -2364,25 +2364,31 @@ Usage Examples:
|
||||
|
||||
To show the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket
|
||||
` + "```" + `
|
||||
|
||||
This will dump something like this showing the lifecycle rules.
|
||||
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
|
||||
If there are no lifecycle rules (the default) then it will just return [].
|
||||
|
||||
To reset the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
` + "```" + `
|
||||
|
||||
This will run and then print the new lifecycle rules as above.
|
||||
|
||||
@@ -2394,7 +2400,9 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
||||
the config also which will mean deletions won't cause versions but
|
||||
overwrites will still cause versions to be made.
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
` + "```" + `
|
||||
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
`,
|
||||
@@ -2484,8 +2492,10 @@ max-age, which defaults to 24 hours.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
` + "```console" + `
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
@@ -2513,7 +2523,9 @@ var cleanupHiddenHelp = fs.CommandHelp{
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
` + "```console" + `
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
` + "```" + `
|
||||
`,
|
||||
}
|
||||
|
||||
|
||||
@@ -929,8 +929,10 @@ strings of the encoded results.
|
||||
|
||||
Usage Example:
|
||||
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
` + "```console" + `
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
` + "```" + `
|
||||
`,
|
||||
},
|
||||
{
|
||||
@@ -942,8 +944,10 @@ inputs are invalid.
|
||||
|
||||
Usage Example:
|
||||
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
` + "```console" + `
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
` + "```" + `
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -563,7 +563,11 @@ var commandHelp = []fs.CommandHelp{{
|
||||
Short: "Show metadata about the DOI.",
|
||||
Long: `This command returns a JSON object with some information about the DOI.
|
||||
|
||||
rclone backend medatadata doi:
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend metadata doi:
|
||||
` + "```" + `
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.
|
||||
`,
|
||||
@@ -573,11 +577,13 @@ It returns a JSON object representing metadata about the DOI.
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running doi backend.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
` + "```console" + `
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
` + "```" + `
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
|
||||
@@ -3669,8 +3669,10 @@ var commandHelp = []fs.CommandHelp{{
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
` + "```console" + `
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
` + "```" + `
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "show the current upload chunk size",
|
||||
@@ -3683,8 +3685,10 @@ Usage Examples:
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
` + "```console" + `
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
` + "```" + `
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "update the current upload chunk size",
|
||||
@@ -3697,8 +3701,10 @@ Usage Examples:
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
` + "```console" + `
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
` + "```" + `
|
||||
|
||||
In the first example this creates a shortcut from the "source_item"
|
||||
which can be a file or a directory to the "destination_shortcut". The
|
||||
@@ -3721,38 +3727,44 @@ account.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend [-o config] drives drive:
|
||||
` + "```console" + `
|
||||
rclone backend [-o config] drives drive:
|
||||
` + "```" + `
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
This will return a JSON list of objects like this:
|
||||
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found and a combined drive.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
` + "```ini" + `
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
` + "```" + `
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
@@ -3768,20 +3780,24 @@ passed in recursively.
|
||||
|
||||
Usage:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
` + "```" + `
|
||||
|
||||
This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
` + "```json" + `
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
` + "```" + `
|
||||
`,
|
||||
}, {
|
||||
Name: "copyid",
|
||||
@@ -3790,8 +3806,10 @@ Result:
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
` + "```console" + `
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
` + "```" + `
|
||||
|
||||
It copies the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||
@@ -3813,8 +3831,10 @@ Use the --interactive/-i or --dry-run flag to see what would be copied before co
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
` + "```console" + `
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
` + "```" + `
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
@@ -3841,40 +3861,49 @@ Use the --interactive/-i or --dry-run flag to see what would be moved beforehand
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend query drive: query
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend query drive: query
|
||||
` + "```" + `
|
||||
|
||||
The query syntax is documented at [Google Drive Search query terms and
|
||||
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
||||
|
||||
For example:
|
||||
|
||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||
` + "```console" + `
|
||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||
` + "```" + `
|
||||
|
||||
If the query contains literal ' or \ characters, these need to be escaped with
|
||||
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
||||
file named "foo ' \.txt":
|
||||
|
||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||
` + "```console" + `
|
||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||
` + "```" + `
|
||||
|
||||
The result is a JSON array of matches, for example:
|
||||
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||
"mimeType": "text/plain",
|
||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||
"name": "foo ' \\.txt",
|
||||
"parents": [
|
||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||
],
|
||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||
"mimeType": "text/plain",
|
||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||
"name": "foo ' \\.txt",
|
||||
"parents": [
|
||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||
],
|
||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]
|
||||
` + "```console" + `
|
||||
`,
|
||||
}, {
|
||||
Name: "rescue",
|
||||
Short: "Rescue or delete any orphaned files.",
|
||||
@@ -3892,19 +3921,27 @@ This can be used in 3 ways.
|
||||
|
||||
First, list all orphaned files
|
||||
|
||||
rclone backend rescue drive:
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive:
|
||||
` + "```" + `
|
||||
|
||||
Second rescue all orphaned files to the directory indicated
|
||||
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
` + "```" + `
|
||||
|
||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||
|
||||
rclone backend rescue drive: Orphans
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: Orphans
|
||||
` + "```" + `
|
||||
|
||||
Third delete all orphaned files to the trash
|
||||
|
||||
rclone backend rescue drive: -o delete
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: -o delete
|
||||
` + "```" + `
|
||||
`,
|
||||
}}
|
||||
|
||||
|
||||
@@ -46,7 +46,10 @@ var commandHelp = []fs.CommandHelp{{
|
||||
Short: "Drop cache.",
|
||||
Long: `Completely drop checksum cache.
|
||||
Usage Example:
|
||||
rclone backend drop hasher:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend drop hasher:
|
||||
` + "```" + `
|
||||
`,
|
||||
}, {
|
||||
Name: "dump",
|
||||
@@ -60,15 +63,23 @@ Usage Example:
|
||||
Name: "import",
|
||||
Short: "Import a SUM file.",
|
||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||
Usage Example:
|
||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||
` + "```" + `
|
||||
`,
|
||||
}, {
|
||||
Name: "stickyimport",
|
||||
Short: "Perform fast import of a SUM file.",
|
||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||
Usage Example:
|
||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||
` + "```" + `
|
||||
`,
|
||||
}}
|
||||
|
||||
|
||||
@@ -724,9 +724,11 @@ for a running http backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||
` + "```console" + `
|
||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||
` + "```" + `
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
|
||||
@@ -3,18 +3,18 @@
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -22,28 +22,34 @@ import (
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
operationRestore = "restore"
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
operationRestore = "restore"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: operationRename,
|
||||
Short: "change the name of an object.",
|
||||
Long: `This command can be used to rename a object.
|
||||
Name: operationRename,
|
||||
Short: "change the name of an object.",
|
||||
Long: `This command can be used to rename a object.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
` + "```console" + `
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
` + "```" + `
|
||||
`,
|
||||
Opts: nil,
|
||||
Opts: nil,
|
||||
}, {
|
||||
Name: operationListMultiPart,
|
||||
Short: "List the unfinished multipart uploads.",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
Name: operationListMultiPart,
|
||||
Short: "List the unfinished multipart uploads.",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
Usage Examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
@@ -51,71 +57,86 @@ multipart uploads.
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
|
||||
` + "```json" + `
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
Name: operationCleanup,
|
||||
Short: "Remove unfinished multipart uploads.",
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
Name: operationCleanup,
|
||||
Short: "Remove unfinished multipart uploads.",
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
Usage Examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
}, {
|
||||
Name: operationRestore,
|
||||
Short: "Restore objects from Archive to Standard storage.",
|
||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||
Name: operationRestore,
|
||||
Short: "Restore objects from Archive to Standard storage.",
|
||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||
|
||||
Usage Examples:
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||
rclone backend restore oos:bucket -o hours=HOURS
|
||||
` + "```console" + `
|
||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||
rclone backend restore oos:bucket -o hours=HOURS
|
||||
` + "```" + `
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```console" + `
|
||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```" + `
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```console" + `
|
||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```" + `
|
||||
|
||||
It returns a list of status dictionaries with Object Name and Status
|
||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||
if not.
|
||||
It returns a list of status dictionaries with Object Name and Status
|
||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
{
|
||||
"Object": "test.txt"
|
||||
"Status": "RESTORED",
|
||||
},
|
||||
{
|
||||
"Object": "test/file4.txt"
|
||||
"Status": "RESTORED",
|
||||
}
|
||||
]
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"Object": "test.txt"
|
||||
"Status": "RESTORED",
|
||||
},
|
||||
{
|
||||
"Object": "test/file4.txt"
|
||||
"Status": "RESTORED",
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||
},
|
||||
Opts: map[string]string{
|
||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -131,104 +152,104 @@ If it is a string or a []string it will be shown to the user
|
||||
otherwise it will be JSON encoded and shown to the user like that
|
||||
*/
|
||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
opt map[string]string) (result any, err error) {
|
||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||
switch commandName {
|
||||
case operationRename:
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
||||
}
|
||||
remote := args[0]
|
||||
newName := args[1]
|
||||
return f.rename(ctx, remote, newName)
|
||||
case operationListMultiPart:
|
||||
return f.listMultipartUploadsAll(ctx)
|
||||
case operationCleanup:
|
||||
maxAge := 24 * time.Hour
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
case operationRestore:
|
||||
return f.restore(ctx, opt)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
opt map[string]string) (result any, err error) {
|
||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||
switch commandName {
|
||||
case operationRename:
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
||||
}
|
||||
remote := args[0]
|
||||
newName := args[1]
|
||||
return f.rename(ctx, remote, newName)
|
||||
case operationListMultiPart:
|
||||
return f.listMultipartUploadsAll(ctx)
|
||||
case operationCleanup:
|
||||
maxAge := 24 * time.Hour
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
case operationRestore:
|
||||
return f.restore(ctx, opt)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) {
|
||||
if remote == "" {
|
||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||
}
|
||||
if newName == "" {
|
||||
return nil, fmt.Errorf("the object's new name cannot be empty")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
bucketName, objectPath := o.split()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
||||
if strings.HasPrefix(objectPath, bucketName) {
|
||||
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
||||
objectPath, bucketName)
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
details := objectstorage.RenameObjectDetails{
|
||||
SourceName: common.String(objectPath),
|
||||
NewName: common.String(newName),
|
||||
}
|
||||
request := objectstorage.RenameObjectRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
RenameObjectDetails: details,
|
||||
OpcClientRequestId: nil,
|
||||
RequestMetadata: common.RequestMetadata{},
|
||||
}
|
||||
var response objectstorage.RenameObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RenameObject(ctx, request)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
||||
return "renamed successfully", nil
|
||||
if remote == "" {
|
||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||
}
|
||||
if newName == "" {
|
||||
return nil, fmt.Errorf("the object's new name cannot be empty")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
bucketName, objectPath := o.split()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
||||
if strings.HasPrefix(objectPath, bucketName) {
|
||||
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
||||
objectPath, bucketName)
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
details := objectstorage.RenameObjectDetails{
|
||||
SourceName: common.String(objectPath),
|
||||
NewName: common.String(newName),
|
||||
}
|
||||
request := objectstorage.RenameObjectRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
RenameObjectDetails: details,
|
||||
OpcClientRequestId: nil,
|
||||
RequestMetadata: common.RequestMetadata{},
|
||||
}
|
||||
var response objectstorage.RenameObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RenameObject(ctx, request)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
||||
return "renamed successfully", nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
|
||||
err error) {
|
||||
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
||||
bucket, directory := f.split("")
|
||||
if bucket != "" {
|
||||
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
return uploadsMap, nil
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
bucket := entry.Remote()
|
||||
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
||||
if listErr != nil {
|
||||
err = listErr
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
}
|
||||
return uploadsMap, err
|
||||
err error) {
|
||||
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
||||
bucket, directory := f.split("")
|
||||
if bucket != "" {
|
||||
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
return uploadsMap, nil
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
bucket := entry.Remote()
|
||||
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
||||
if listErr != nil {
|
||||
err = listErr
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
}
|
||||
return uploadsMap, err
|
||||
}
|
||||
|
||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||
@@ -237,8 +258,8 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
|
||||
}
|
||||
|
||||
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
|
||||
@@ -247,147 +268,147 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pastUploads) > 0 {
|
||||
sort.Slice(pastUploads, func(i, j int) bool {
|
||||
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
|
||||
})
|
||||
return pastUploads[:1], nil
|
||||
}
|
||||
return nil, err
|
||||
if len(pastUploads) > 0 {
|
||||
sort.Slice(pastUploads, func(i, j int) bool {
|
||||
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
|
||||
})
|
||||
return pastUploads[:1], nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploads(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
||||
return uploads, err
|
||||
}
|
||||
for index, item := range response.Items {
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
if exact {
|
||||
if *item.Object == directory {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
} else {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploads, nil
|
||||
var response objectstorage.ListMultipartUploadsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploads(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
||||
return uploads, err
|
||||
}
|
||||
for index, item := range response.Items {
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
if exact {
|
||||
if *item.Object == directory {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
} else {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
|
||||
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
req := objectstorage.ListMultipartUploadPartsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
Limit: common.Int(1000),
|
||||
}
|
||||
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
req := objectstorage.ListMultipartUploadPartsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
Limit: common.Int(1000),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadPartsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploadParts(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return uploadedParts, err
|
||||
}
|
||||
for _, item := range response.Items {
|
||||
uploadedParts[*item.PartNumber] = item
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploadedParts, nil
|
||||
var response objectstorage.ListMultipartUploadPartsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploadParts(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return uploadedParts, err
|
||||
}
|
||||
for _, item := range response.Items {
|
||||
uploadedParts[*item.PartNumber] = item
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) {
|
||||
req := objectstorage.RestoreObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||
}
|
||||
if hours := opt["hours"]; hours != "" {
|
||||
ihours, err := strconv.Atoi(hours)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad value for hours: %w", err)
|
||||
}
|
||||
req.RestoreObjectsDetails.Hours = &ihours
|
||||
}
|
||||
type status struct {
|
||||
Object string
|
||||
Status string
|
||||
}
|
||||
var (
|
||||
outMu sync.Mutex
|
||||
out = []status{}
|
||||
err error
|
||||
)
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
// Remember this is run --checkers times concurrently
|
||||
o, ok := obj.(*Object)
|
||||
st := status{Object: obj.Remote(), Status: "RESTORED"}
|
||||
defer func() {
|
||||
outMu.Lock()
|
||||
out = append(out, st)
|
||||
outMu.Unlock()
|
||||
}()
|
||||
if !ok {
|
||||
st.Status = "Not an OCI Object Storage object"
|
||||
return
|
||||
}
|
||||
if o.storageTier == nil || (*o.storageTier != "archive") {
|
||||
st.Status = "Object not in Archive storage tier"
|
||||
return
|
||||
}
|
||||
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||
return
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
reqCopy := req
|
||||
reqCopy.BucketName = &bucket
|
||||
reqCopy.ObjectName = &bucketPath
|
||||
var response objectstorage.RestoreObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RestoreObjects(ctx, reqCopy)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
st.Status = err.Error()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return out, nil
|
||||
req := objectstorage.RestoreObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||
}
|
||||
if hours := opt["hours"]; hours != "" {
|
||||
ihours, err := strconv.Atoi(hours)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad value for hours: %w", err)
|
||||
}
|
||||
req.RestoreObjectsDetails.Hours = &ihours
|
||||
}
|
||||
type status struct {
|
||||
Object string
|
||||
Status string
|
||||
}
|
||||
var (
|
||||
outMu sync.Mutex
|
||||
out = []status{}
|
||||
err error
|
||||
)
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
// Remember this is run --checkers times concurrently
|
||||
o, ok := obj.(*Object)
|
||||
st := status{Object: obj.Remote(), Status: "RESTORED"}
|
||||
defer func() {
|
||||
outMu.Lock()
|
||||
out = append(out, st)
|
||||
outMu.Unlock()
|
||||
}()
|
||||
if !ok {
|
||||
st.Status = "Not an OCI Object Storage object"
|
||||
return
|
||||
}
|
||||
if o.storageTier == nil || (*o.storageTier != "archive") {
|
||||
st.Status = "Object not in Archive storage tier"
|
||||
return
|
||||
}
|
||||
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||
return
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
reqCopy := req
|
||||
reqCopy.BucketName = &bucket
|
||||
reqCopy.ObjectName = &bucketPath
|
||||
var response objectstorage.RestoreObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RestoreObjects(ctx, reqCopy)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
st.Status = err.Error()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -1683,7 +1683,9 @@ var commandHelp = []fs.CommandHelp{{
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend addurl pikpak:dirpath url
|
||||
` + "```console" + `
|
||||
rclone backend addurl pikpak:dirpath url
|
||||
` + "```" + `
|
||||
|
||||
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
||||
download will fallback to default 'My Pack' folder.
|
||||
@@ -1695,8 +1697,10 @@ download will fallback to default 'My Pack' folder.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
||||
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
||||
` + "```console" + `
|
||||
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
||||
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
||||
` + "```" + `
|
||||
|
||||
An optional argument 'filename' can be specified for a file located in
|
||||
'pikpak:dirpath'. You may want to pass '-o password=password' for a
|
||||
@@ -1705,11 +1709,13 @@ source files after decompression finished.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Decompressed": 17,
|
||||
"SourceDeleted": 0,
|
||||
"Errors": 0
|
||||
}
|
||||
` + "```json" + `
|
||||
{
|
||||
"Decompressed": 17,
|
||||
"SourceDeleted": 0,
|
||||
"Errors": 0
|
||||
}
|
||||
` + "```" + `
|
||||
`,
|
||||
}}
|
||||
|
||||
|
||||
182
backend/s3/s3.go
182
backend/s3/s3.go
@@ -2908,34 +2908,41 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||
` + "```console" + `
|
||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||
` + "```" + `
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
` + "```console" + `
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
` + "```" + `
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
` + "```console" + `
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
` + "```" + `
|
||||
|
||||
It returns a list of status dictionaries with Remote and Status
|
||||
keys. The Status will be OK if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
{
|
||||
"Status": "OK",
|
||||
"Remote": "test.txt"
|
||||
},
|
||||
{
|
||||
"Status": "OK",
|
||||
"Remote": "test/file4.txt"
|
||||
}
|
||||
]
|
||||
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"Status": "OK",
|
||||
"Remote": "test.txt"
|
||||
},
|
||||
{
|
||||
"Status": "OK",
|
||||
"Remote": "test/file4.txt"
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
||||
@@ -2950,43 +2957,47 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore-status s3:bucket/path/to/object
|
||||
rclone backend restore-status s3:bucket/path/to/directory
|
||||
rclone backend restore-status -o all s3:bucket/path/to/directory
|
||||
` + "```console" + `
|
||||
rclone backend restore-status s3:bucket/path/to/object
|
||||
rclone backend restore-status s3:bucket/path/to/directory
|
||||
rclone backend restore-status -o all s3:bucket/path/to/directory
|
||||
` + "```" + `
|
||||
|
||||
This command does not obey the filters.
|
||||
|
||||
It returns a list of status dictionaries.
|
||||
|
||||
[
|
||||
{
|
||||
"Remote": "file.txt",
|
||||
"VersionID": null,
|
||||
"RestoreStatus": {
|
||||
"IsRestoreInProgress": true,
|
||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||
},
|
||||
"StorageClass": "GLACIER"
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"Remote": "file.txt",
|
||||
"VersionID": null,
|
||||
"RestoreStatus": {
|
||||
"IsRestoreInProgress": true,
|
||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||
},
|
||||
{
|
||||
"Remote": "test.pdf",
|
||||
"VersionID": null,
|
||||
"RestoreStatus": {
|
||||
"IsRestoreInProgress": false,
|
||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||
},
|
||||
"StorageClass": "DEEP_ARCHIVE"
|
||||
"StorageClass": "GLACIER"
|
||||
},
|
||||
{
|
||||
"Remote": "test.pdf",
|
||||
"VersionID": null,
|
||||
"RestoreStatus": {
|
||||
"IsRestoreInProgress": false,
|
||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||
},
|
||||
{
|
||||
"Remote": "test.gz",
|
||||
"VersionID": null,
|
||||
"RestoreStatus": {
|
||||
"IsRestoreInProgress": true,
|
||||
"RestoreExpiryDate": "null"
|
||||
},
|
||||
"StorageClass": "INTELLIGENT_TIERING"
|
||||
}
|
||||
]
|
||||
"StorageClass": "DEEP_ARCHIVE"
|
||||
},
|
||||
{
|
||||
"Remote": "test.gz",
|
||||
"VersionID": null,
|
||||
"RestoreStatus": {
|
||||
"IsRestoreInProgress": true,
|
||||
"RestoreExpiryDate": "null"
|
||||
},
|
||||
"StorageClass": "INTELLIGENT_TIERING"
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"all": "if set then show all objects, not just ones with restore status",
|
||||
@@ -2996,7 +3007,11 @@ It returns a list of status dictionaries.
|
||||
Short: "List the unfinished multipart uploads.",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart s3:bucket/path/to/object
|
||||
Usage Examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend list-multipart s3:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
@@ -3004,27 +3019,28 @@ multipart uploads.
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"rclone": [
|
||||
` + "```json" + `
|
||||
{
|
||||
"rclone": [
|
||||
{
|
||||
"Initiated": "2020-06-26T14:20:36Z",
|
||||
"Initiator": {
|
||||
"DisplayName": "XXX",
|
||||
"ID": "arn:aws:iam::XXX:user/XXX"
|
||||
},
|
||||
"Key": "KEY",
|
||||
"Owner": {
|
||||
"DisplayName": null,
|
||||
"ID": "XXX"
|
||||
},
|
||||
"StorageClass": "STANDARD",
|
||||
"UploadId": "XXX"
|
||||
"Initiated": "2020-06-26T14:20:36Z",
|
||||
"Initiator": {
|
||||
"DisplayName": "XXX",
|
||||
"ID": "arn:aws:iam::XXX:user/XXX"
|
||||
},
|
||||
"Key": "KEY",
|
||||
"Owner": {
|
||||
"DisplayName": null,
|
||||
"ID": "XXX"
|
||||
},
|
||||
"StorageClass": "STANDARD",
|
||||
"UploadId": "XXX"
|
||||
}
|
||||
],
|
||||
"rclone-1000files": [],
|
||||
"rclone-dst": []
|
||||
}
|
||||
|
||||
],
|
||||
"rclone-1000files": [],
|
||||
"rclone-dst": []
|
||||
}
|
||||
` + "```" + `
|
||||
`,
|
||||
}, {
|
||||
Name: "cleanup",
|
||||
@@ -3035,8 +3051,12 @@ max-age which defaults to 24 hours.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup s3:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||
Usage Examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend cleanup s3:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
@@ -3052,7 +3072,11 @@ on a versions enabled bucket.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||
Usage Examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||
` + "```" + `
|
||||
`,
|
||||
}, {
|
||||
Name: "versioning",
|
||||
@@ -3061,9 +3085,13 @@ it would do.
|
||||
passed and then returns the current versioning status for the bucket
|
||||
supplied.
|
||||
|
||||
rclone backend versioning s3:bucket # read status only
|
||||
rclone backend versioning s3:bucket Enabled
|
||||
rclone backend versioning s3:bucket Suspended
|
||||
Usage Examples:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend versioning s3:bucket # read status only
|
||||
rclone backend versioning s3:bucket Enabled
|
||||
rclone backend versioning s3:bucket Suspended
|
||||
` + "```" + `
|
||||
|
||||
It may return "Enabled", "Suspended" or "Unversioned". Note that once versioning
|
||||
has been enabled the status can't be set back to "Unversioned".
|
||||
@@ -3076,9 +3104,11 @@ for a running s3 backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
|
||||
` + "```console" + `
|
||||
rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
|
||||
` + "```" + `
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user