mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
docs: fix markdownlint issue md046/code-block-style in backend command docs
This commit is contained in:
@@ -2364,25 +2364,31 @@ Usage Examples:
|
|||||||
|
|
||||||
To show the current lifecycle rules:
|
To show the current lifecycle rules:
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket
|
` + "```console" + `
|
||||||
|
rclone backend lifecycle b2:bucket
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This will dump something like this showing the lifecycle rules.
|
This will dump something like this showing the lifecycle rules.
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"daysFromHidingToDeleting": 1,
|
{
|
||||||
"daysFromUploadingToHiding": null,
|
"daysFromHidingToDeleting": 1,
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
"daysFromUploadingToHiding": null,
|
||||||
"fileNamePrefix": ""
|
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||||
}
|
"fileNamePrefix": ""
|
||||||
]
|
}
|
||||||
|
]
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If there are no lifecycle rules (the default) then it will just return [].
|
If there are no lifecycle rules (the default) then it will just return [].
|
||||||
|
|
||||||
To reset the current lifecycle rules:
|
To reset the current lifecycle rules:
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
` + "```console" + `
|
||||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||||
|
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This will run and then print the new lifecycle rules as above.
|
This will run and then print the new lifecycle rules as above.
|
||||||
|
|
||||||
@@ -2394,7 +2400,9 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
|||||||
the config also which will mean deletions won't cause versions but
|
the config also which will mean deletions won't cause versions but
|
||||||
overwrites will still cause versions to be made.
|
overwrites will still cause versions to be made.
|
||||||
|
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
` + "```console" + `
|
||||||
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||||
`,
|
`,
|
||||||
@@ -2484,8 +2492,10 @@ max-age, which defaults to 24 hours.
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
rclone backend cleanup b2:bucket/path/to/object
|
` + "```console" + `
|
||||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
rclone backend cleanup b2:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||||
`,
|
`,
|
||||||
@@ -2513,7 +2523,9 @@ var cleanupHiddenHelp = fs.CommandHelp{
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
` + "```console" + `
|
||||||
|
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -929,8 +929,10 @@ strings of the encoded results.
|
|||||||
|
|
||||||
Usage Example:
|
Usage Example:
|
||||||
|
|
||||||
rclone backend encode crypt: file1 [file2...]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
rclone backend encode crypt: file1 [file2...]
|
||||||
|
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -942,8 +944,10 @@ inputs are invalid.
|
|||||||
|
|
||||||
Usage Example:
|
Usage Example:
|
||||||
|
|
||||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||||
|
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -563,7 +563,11 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
Short: "Show metadata about the DOI.",
|
Short: "Show metadata about the DOI.",
|
||||||
Long: `This command returns a JSON object with some information about the DOI.
|
Long: `This command returns a JSON object with some information about the DOI.
|
||||||
|
|
||||||
rclone backend medatadata doi:
|
Usage example:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend metadata doi:
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It returns a JSON object representing metadata about the DOI.
|
It returns a JSON object representing metadata about the DOI.
|
||||||
`,
|
`,
|
||||||
@@ -573,11 +577,13 @@ It returns a JSON object representing metadata about the DOI.
|
|||||||
Long: `This set command can be used to update the config parameters
|
Long: `This set command can be used to update the config parameters
|
||||||
for a running doi backend.
|
for a running doi backend.
|
||||||
|
|
||||||
Usage Examples:
|
Usage examples:
|
||||||
|
|
||||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
|
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
|
|||||||
@@ -3669,8 +3669,10 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||||
|
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"chunk_size": "show the current upload chunk size",
|
"chunk_size": "show the current upload chunk size",
|
||||||
@@ -3683,8 +3685,10 @@ Usage Examples:
|
|||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||||
|
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"chunk_size": "update the current upload chunk size",
|
"chunk_size": "update the current upload chunk size",
|
||||||
@@ -3697,8 +3701,10 @@ Usage Examples:
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend shortcut drive: source_item destination_shortcut
|
` + "```console" + `
|
||||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
rclone backend shortcut drive: source_item destination_shortcut
|
||||||
|
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
In the first example this creates a shortcut from the "source_item"
|
In the first example this creates a shortcut from the "source_item"
|
||||||
which can be a file or a directory to the "destination_shortcut". The
|
which can be a file or a directory to the "destination_shortcut". The
|
||||||
@@ -3721,38 +3727,44 @@ account.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend [-o config] drives drive:
|
` + "```console" + `
|
||||||
|
rclone backend [-o config] drives drive:
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This will return a JSON list of objects like this
|
This will return a JSON list of objects like this:
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"id": "0ABCDEF-01234567890",
|
{
|
||||||
"kind": "drive#teamDrive",
|
"id": "0ABCDEF-01234567890",
|
||||||
"name": "My Drive"
|
"kind": "drive#teamDrive",
|
||||||
},
|
"name": "My Drive"
|
||||||
{
|
},
|
||||||
"id": "0ABCDEFabcdefghijkl",
|
{
|
||||||
"kind": "drive#teamDrive",
|
"id": "0ABCDEFabcdefghijkl",
|
||||||
"name": "Test Drive"
|
"kind": "drive#teamDrive",
|
||||||
}
|
"name": "Test Drive"
|
||||||
]
|
}
|
||||||
|
]
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
With the -o config parameter it will output the list in a format
|
With the -o config parameter it will output the list in a format
|
||||||
suitable for adding to a config file to make aliases for all the
|
suitable for adding to a config file to make aliases for all the
|
||||||
drives found and a combined drive.
|
drives found and a combined drive.
|
||||||
|
|
||||||
[My Drive]
|
` + "```ini" + `
|
||||||
type = alias
|
[My Drive]
|
||||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
type = alias
|
||||||
|
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||||
|
|
||||||
[Test Drive]
|
[Test Drive]
|
||||||
type = alias
|
type = alias
|
||||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||||
|
|
||||||
[AllDrives]
|
[AllDrives]
|
||||||
type = combine
|
type = combine
|
||||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Adding this to the rclone config file will cause those team drives to
|
Adding this to the rclone config file will cause those team drives to
|
||||||
be accessible with the aliases shown. Any illegal characters will be
|
be accessible with the aliases shown. Any illegal characters will be
|
||||||
@@ -3768,20 +3780,24 @@ passed in recursively.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend untrash drive:directory
|
||||||
|
rclone backend --interactive untrash drive:directory subdir
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This takes an optional directory to trash which make this easier to
|
This takes an optional directory to trash which make this easier to
|
||||||
use via the API.
|
use via the API.
|
||||||
|
|
||||||
rclone backend untrash drive:directory
|
|
||||||
rclone backend --interactive untrash drive:directory subdir
|
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
{
|
` + "```json" + `
|
||||||
"Untrashed": 17,
|
{
|
||||||
"Errors": 0
|
"Untrashed": 17,
|
||||||
}
|
"Errors": 0
|
||||||
|
}
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "copyid",
|
Name: "copyid",
|
||||||
@@ -3790,8 +3806,10 @@ Result:
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend copyid drive: ID path
|
` + "```console" + `
|
||||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
rclone backend copyid drive: ID path
|
||||||
|
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It copies the drive file with ID given to the path (an rclone path which
|
It copies the drive file with ID given to the path (an rclone path which
|
||||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||||
@@ -3813,8 +3831,10 @@ Use the --interactive/-i or --dry-run flag to see what would be copied before co
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend moveid drive: ID path
|
` + "```console" + `
|
||||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
rclone backend moveid drive: ID path
|
||||||
|
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It moves the drive file with ID given to the path (an rclone path which
|
It moves the drive file with ID given to the path (an rclone path which
|
||||||
will be passed internally to rclone moveto).
|
will be passed internally to rclone moveto).
|
||||||
@@ -3841,40 +3861,49 @@ Use the --interactive/-i or --dry-run flag to see what would be moved beforehand
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend query drive: query
|
` + "```console" + `
|
||||||
|
rclone backend query drive: query
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The query syntax is documented at [Google Drive Search query terms and
|
The query syntax is documented at [Google Drive Search query terms and
|
||||||
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
` + "```console" + `
|
||||||
|
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If the query contains literal ' or \ characters, these need to be escaped with
|
If the query contains literal ' or \ characters, these need to be escaped with
|
||||||
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
||||||
file named "foo ' \.txt":
|
file named "foo ' \.txt":
|
||||||
|
|
||||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
` + "```console" + `
|
||||||
|
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The result is a JSON array of matches, for example:
|
The result is a JSON array of matches, for example:
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
{
|
||||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||||
"mimeType": "text/plain",
|
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
"mimeType": "text/plain",
|
||||||
"name": "foo ' \\.txt",
|
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||||
"parents": [
|
"name": "foo ' \\.txt",
|
||||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
"parents": [
|
||||||
],
|
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
],
|
||||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||||
"size": "311",
|
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
"size": "311",
|
||||||
}
|
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||||
]`,
|
}
|
||||||
|
]
|
||||||
|
` + "```console" + `
|
||||||
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "rescue",
|
Name: "rescue",
|
||||||
Short: "Rescue or delete any orphaned files.",
|
Short: "Rescue or delete any orphaned files.",
|
||||||
@@ -3892,19 +3921,27 @@ This can be used in 3 ways.
|
|||||||
|
|
||||||
First, list all orphaned files
|
First, list all orphaned files
|
||||||
|
|
||||||
rclone backend rescue drive:
|
` + "```console" + `
|
||||||
|
rclone backend rescue drive:
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Second rescue all orphaned files to the directory indicated
|
Second rescue all orphaned files to the directory indicated
|
||||||
|
|
||||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
` + "```console" + `
|
||||||
|
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||||
|
|
||||||
rclone backend rescue drive: Orphans
|
` + "```console" + `
|
||||||
|
rclone backend rescue drive: Orphans
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Third delete all orphaned files to the trash
|
Third delete all orphaned files to the trash
|
||||||
|
|
||||||
rclone backend rescue drive: -o delete
|
` + "```console" + `
|
||||||
|
rclone backend rescue drive: -o delete
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
|||||||
@@ -46,7 +46,10 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
Short: "Drop cache.",
|
Short: "Drop cache.",
|
||||||
Long: `Completely drop checksum cache.
|
Long: `Completely drop checksum cache.
|
||||||
Usage Example:
|
Usage Example:
|
||||||
rclone backend drop hasher:
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend drop hasher:
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "dump",
|
Name: "dump",
|
||||||
@@ -60,15 +63,23 @@ Usage Example:
|
|||||||
Name: "import",
|
Name: "import",
|
||||||
Short: "Import a SUM file.",
|
Short: "Import a SUM file.",
|
||||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||||
Usage Example:
|
|
||||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
Usage example:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "stickyimport",
|
Name: "stickyimport",
|
||||||
Short: "Perform fast import of a SUM file.",
|
Short: "Perform fast import of a SUM file.",
|
||||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||||
Usage Example:
|
|
||||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
Usage example:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
|||||||
@@ -724,9 +724,11 @@ for a running http backend.
|
|||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
|
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
|
|||||||
@@ -3,18 +3,18 @@
|
|||||||
package oracleobjectstorage
|
package oracleobjectstorage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/oracle/oci-go-sdk/v65/common"
|
"github.com/oracle/oci-go-sdk/v65/common"
|
||||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -22,28 +22,34 @@ import (
|
|||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
const (
|
const (
|
||||||
operationRename = "rename"
|
operationRename = "rename"
|
||||||
operationListMultiPart = "list-multipart-uploads"
|
operationListMultiPart = "list-multipart-uploads"
|
||||||
operationCleanup = "cleanup"
|
operationCleanup = "cleanup"
|
||||||
operationRestore = "restore"
|
operationRestore = "restore"
|
||||||
)
|
)
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: operationRename,
|
Name: operationRename,
|
||||||
Short: "change the name of an object.",
|
Short: "change the name of an object.",
|
||||||
Long: `This command can be used to rename a object.
|
Long: `This command can be used to rename a object.
|
||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
` + "```console" + `
|
||||||
|
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
Opts: nil,
|
Opts: nil,
|
||||||
}, {
|
}, {
|
||||||
Name: operationListMultiPart,
|
Name: operationListMultiPart,
|
||||||
Short: "List the unfinished multipart uploads.",
|
Short: "List the unfinished multipart uploads.",
|
||||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||||
|
|
||||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
Usage Examples:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It returns a dictionary of buckets with values as lists of unfinished
|
It returns a dictionary of buckets with values as lists of unfinished
|
||||||
multipart uploads.
|
multipart uploads.
|
||||||
@@ -51,71 +57,86 @@ multipart uploads.
|
|||||||
You can call it with no bucket in which case it lists all bucket, with
|
You can call it with no bucket in which case it lists all bucket, with
|
||||||
a bucket or with a bucket and path.
|
a bucket or with a bucket and path.
|
||||||
|
|
||||||
{
|
|
||||||
"test-bucket": [
|
` + "```json" + `
|
||||||
{
|
{
|
||||||
"namespace": "test-namespace",
|
"test-bucket": [
|
||||||
"bucket": "test-bucket",
|
{
|
||||||
"object": "600m.bin",
|
"namespace": "test-namespace",
|
||||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
"bucket": "test-bucket",
|
||||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
"object": "600m.bin",
|
||||||
"storageTier": "Standard"
|
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||||
}
|
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||||
]
|
"storageTier": "Standard"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: operationCleanup,
|
Name: operationCleanup,
|
||||||
Short: "Remove unfinished multipart uploads.",
|
Short: "Remove unfinished multipart uploads.",
|
||||||
Long: `This command removes unfinished multipart uploads of age greater than
|
Long: `This command removes unfinished multipart uploads of age greater than
|
||||||
max-age which defaults to 24 hours.
|
max-age which defaults to 24 hours.
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
rclone backend cleanup oos:bucket/path/to/object
|
Usage Examples:
|
||||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend cleanup oos:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||||
`,
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"max-age": "Max age of upload to delete",
|
"max-age": "Max age of upload to delete",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: operationRestore,
|
Name: operationRestore,
|
||||||
Short: "Restore objects from Archive to Standard storage.",
|
Short: "Restore objects from Archive to Standard storage.",
|
||||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
` + "```console" + `
|
||||||
rclone backend restore oos:bucket -o hours=HOURS
|
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||||
|
rclone backend restore oos:bucket -o hours=HOURS
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||||
|
|
||||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
` + "```console" + `
|
||||||
|
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
All the objects shown will be marked for restore, then
|
All the objects shown will be marked for restore, then
|
||||||
|
|
||||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
` + "```console" + `
|
||||||
|
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It returns a list of status dictionaries with Object Name and Status
|
It returns a list of status dictionaries with Object Name and Status
|
||||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||||
if not.
|
if not.
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"Object": "test.txt"
|
{
|
||||||
"Status": "RESTORED",
|
"Object": "test.txt"
|
||||||
},
|
"Status": "RESTORED",
|
||||||
{
|
},
|
||||||
"Object": "test/file4.txt"
|
{
|
||||||
"Status": "RESTORED",
|
"Object": "test/file4.txt"
|
||||||
}
|
"Status": "RESTORED",
|
||||||
]
|
}
|
||||||
|
]
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,104 +152,104 @@ If it is a string or a []string it will be shown to the user
|
|||||||
otherwise it will be JSON encoded and shown to the user like that
|
otherwise it will be JSON encoded and shown to the user like that
|
||||||
*/
|
*/
|
||||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||||
opt map[string]string) (result any, err error) {
|
opt map[string]string) (result any, err error) {
|
||||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||||
switch commandName {
|
switch commandName {
|
||||||
case operationRename:
|
case operationRename:
|
||||||
if len(args) < 2 {
|
if len(args) < 2 {
|
||||||
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
||||||
}
|
}
|
||||||
remote := args[0]
|
remote := args[0]
|
||||||
newName := args[1]
|
newName := args[1]
|
||||||
return f.rename(ctx, remote, newName)
|
return f.rename(ctx, remote, newName)
|
||||||
case operationListMultiPart:
|
case operationListMultiPart:
|
||||||
return f.listMultipartUploadsAll(ctx)
|
return f.listMultipartUploadsAll(ctx)
|
||||||
case operationCleanup:
|
case operationCleanup:
|
||||||
maxAge := 24 * time.Hour
|
maxAge := 24 * time.Hour
|
||||||
if opt["max-age"] != "" {
|
if opt["max-age"] != "" {
|
||||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, f.cleanUp(ctx, maxAge)
|
return nil, f.cleanUp(ctx, maxAge)
|
||||||
case operationRestore:
|
case operationRestore:
|
||||||
return f.restore(ctx, opt)
|
return f.restore(ctx, opt)
|
||||||
default:
|
default:
|
||||||
return nil, fs.ErrorCommandNotFound
|
return nil, fs.ErrorCommandNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) {
|
func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) {
|
||||||
if remote == "" {
|
if remote == "" {
|
||||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||||
}
|
}
|
||||||
if newName == "" {
|
if newName == "" {
|
||||||
return nil, fmt.Errorf("the object's new name cannot be empty")
|
return nil, fmt.Errorf("the object's new name cannot be empty")
|
||||||
}
|
}
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
bucketName, objectPath := o.split()
|
bucketName, objectPath := o.split()
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
||||||
if strings.HasPrefix(objectPath, bucketName) {
|
if strings.HasPrefix(objectPath, bucketName) {
|
||||||
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
||||||
objectPath, bucketName)
|
objectPath, bucketName)
|
||||||
}
|
}
|
||||||
return nil, fs.ErrorNotAFile
|
return nil, fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
details := objectstorage.RenameObjectDetails{
|
details := objectstorage.RenameObjectDetails{
|
||||||
SourceName: common.String(objectPath),
|
SourceName: common.String(objectPath),
|
||||||
NewName: common.String(newName),
|
NewName: common.String(newName),
|
||||||
}
|
}
|
||||||
request := objectstorage.RenameObjectRequest{
|
request := objectstorage.RenameObjectRequest{
|
||||||
NamespaceName: common.String(f.opt.Namespace),
|
NamespaceName: common.String(f.opt.Namespace),
|
||||||
BucketName: common.String(bucketName),
|
BucketName: common.String(bucketName),
|
||||||
RenameObjectDetails: details,
|
RenameObjectDetails: details,
|
||||||
OpcClientRequestId: nil,
|
OpcClientRequestId: nil,
|
||||||
RequestMetadata: common.RequestMetadata{},
|
RequestMetadata: common.RequestMetadata{},
|
||||||
}
|
}
|
||||||
var response objectstorage.RenameObjectResponse
|
var response objectstorage.RenameObjectResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
response, err = f.srv.RenameObject(ctx, request)
|
response, err = f.srv.RenameObject(ctx, request)
|
||||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
||||||
return "renamed successfully", nil
|
return "renamed successfully", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
|
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
|
||||||
err error) {
|
err error) {
|
||||||
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
||||||
bucket, directory := f.split("")
|
bucket, directory := f.split("")
|
||||||
if bucket != "" {
|
if bucket != "" {
|
||||||
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return uploadsMap, err
|
return uploadsMap, err
|
||||||
}
|
}
|
||||||
uploadsMap[bucket] = uploads
|
uploadsMap[bucket] = uploads
|
||||||
return uploadsMap, nil
|
return uploadsMap, nil
|
||||||
}
|
}
|
||||||
entries, err := f.listBuckets(ctx)
|
entries, err := f.listBuckets(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return uploadsMap, err
|
return uploadsMap, err
|
||||||
}
|
}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
bucket := entry.Remote()
|
bucket := entry.Remote()
|
||||||
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
||||||
if listErr != nil {
|
if listErr != nil {
|
||||||
err = listErr
|
err = listErr
|
||||||
fs.Errorf(f, "%v", err)
|
fs.Errorf(f, "%v", err)
|
||||||
}
|
}
|
||||||
uploadsMap[bucket] = uploads
|
uploadsMap[bucket] = uploads
|
||||||
}
|
}
|
||||||
return uploadsMap, err
|
return uploadsMap, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||||
@@ -237,8 +258,8 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
|
|||||||
// directories and objects. This could surprise the user if they ask
|
// directories and objects. This could surprise the user if they ask
|
||||||
// for "dir" and it returns "dirKey"
|
// for "dir" and it returns "dirKey"
|
||||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||||
uploads []*objectstorage.MultipartUpload, err error) {
|
uploads []*objectstorage.MultipartUpload, err error) {
|
||||||
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
|
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
|
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
|
||||||
@@ -247,147 +268,147 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
|
|||||||
// directories and objects. This could surprise the user if they ask
|
// directories and objects. This could surprise the user if they ask
|
||||||
// for "dir" and it returns "dirKey"
|
// for "dir" and it returns "dirKey"
|
||||||
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
|
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
|
||||||
uploads []*objectstorage.MultipartUpload, err error) {
|
uploads []*objectstorage.MultipartUpload, err error) {
|
||||||
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
|
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pastUploads) > 0 {
|
if len(pastUploads) > 0 {
|
||||||
sort.Slice(pastUploads, func(i, j int) bool {
|
sort.Slice(pastUploads, func(i, j int) bool {
|
||||||
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
|
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
|
||||||
})
|
})
|
||||||
return pastUploads[:1], nil
|
return pastUploads[:1], nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
|
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
|
||||||
uploads []*objectstorage.MultipartUpload, err error) {
|
uploads []*objectstorage.MultipartUpload, err error) {
|
||||||
|
|
||||||
uploads = []*objectstorage.MultipartUpload{}
|
uploads = []*objectstorage.MultipartUpload{}
|
||||||
req := objectstorage.ListMultipartUploadsRequest{
|
req := objectstorage.ListMultipartUploadsRequest{
|
||||||
NamespaceName: common.String(f.opt.Namespace),
|
NamespaceName: common.String(f.opt.Namespace),
|
||||||
BucketName: common.String(bucketName),
|
BucketName: common.String(bucketName),
|
||||||
}
|
}
|
||||||
|
|
||||||
var response objectstorage.ListMultipartUploadsResponse
|
var response objectstorage.ListMultipartUploadsResponse
|
||||||
for {
|
for {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
response, err = f.srv.ListMultipartUploads(ctx, req)
|
response, err = f.srv.ListMultipartUploads(ctx, req)
|
||||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
||||||
return uploads, err
|
return uploads, err
|
||||||
}
|
}
|
||||||
for index, item := range response.Items {
|
for index, item := range response.Items {
|
||||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if exact {
|
if exact {
|
||||||
if *item.Object == directory {
|
if *item.Object == directory {
|
||||||
uploads = append(uploads, &response.Items[index])
|
uploads = append(uploads, &response.Items[index])
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uploads = append(uploads, &response.Items[index])
|
uploads = append(uploads, &response.Items[index])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if response.OpcNextPage == nil {
|
if response.OpcNextPage == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
req.Page = response.OpcNextPage
|
req.Page = response.OpcNextPage
|
||||||
}
|
}
|
||||||
return uploads, nil
|
return uploads, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
|
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
|
||||||
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||||
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||||
req := objectstorage.ListMultipartUploadPartsRequest{
|
req := objectstorage.ListMultipartUploadPartsRequest{
|
||||||
NamespaceName: common.String(f.opt.Namespace),
|
NamespaceName: common.String(f.opt.Namespace),
|
||||||
BucketName: common.String(bucketName),
|
BucketName: common.String(bucketName),
|
||||||
ObjectName: common.String(bucketPath),
|
ObjectName: common.String(bucketPath),
|
||||||
UploadId: common.String(uploadID),
|
UploadId: common.String(uploadID),
|
||||||
Limit: common.Int(1000),
|
Limit: common.Int(1000),
|
||||||
}
|
}
|
||||||
|
|
||||||
var response objectstorage.ListMultipartUploadPartsResponse
|
var response objectstorage.ListMultipartUploadPartsResponse
|
||||||
for {
|
for {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
response, err = f.srv.ListMultipartUploadParts(ctx, req)
|
response, err = f.srv.ListMultipartUploadParts(ctx, req)
|
||||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return uploadedParts, err
|
return uploadedParts, err
|
||||||
}
|
}
|
||||||
for _, item := range response.Items {
|
for _, item := range response.Items {
|
||||||
uploadedParts[*item.PartNumber] = item
|
uploadedParts[*item.PartNumber] = item
|
||||||
}
|
}
|
||||||
if response.OpcNextPage == nil {
|
if response.OpcNextPage == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
req.Page = response.OpcNextPage
|
req.Page = response.OpcNextPage
|
||||||
}
|
}
|
||||||
return uploadedParts, nil
|
return uploadedParts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) {
|
func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) {
|
||||||
req := objectstorage.RestoreObjectsRequest{
|
req := objectstorage.RestoreObjectsRequest{
|
||||||
NamespaceName: common.String(f.opt.Namespace),
|
NamespaceName: common.String(f.opt.Namespace),
|
||||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||||
}
|
}
|
||||||
if hours := opt["hours"]; hours != "" {
|
if hours := opt["hours"]; hours != "" {
|
||||||
ihours, err := strconv.Atoi(hours)
|
ihours, err := strconv.Atoi(hours)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("bad value for hours: %w", err)
|
return nil, fmt.Errorf("bad value for hours: %w", err)
|
||||||
}
|
}
|
||||||
req.RestoreObjectsDetails.Hours = &ihours
|
req.RestoreObjectsDetails.Hours = &ihours
|
||||||
}
|
}
|
||||||
type status struct {
|
type status struct {
|
||||||
Object string
|
Object string
|
||||||
Status string
|
Status string
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
outMu sync.Mutex
|
outMu sync.Mutex
|
||||||
out = []status{}
|
out = []status{}
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||||
// Remember this is run --checkers times concurrently
|
// Remember this is run --checkers times concurrently
|
||||||
o, ok := obj.(*Object)
|
o, ok := obj.(*Object)
|
||||||
st := status{Object: obj.Remote(), Status: "RESTORED"}
|
st := status{Object: obj.Remote(), Status: "RESTORED"}
|
||||||
defer func() {
|
defer func() {
|
||||||
outMu.Lock()
|
outMu.Lock()
|
||||||
out = append(out, st)
|
out = append(out, st)
|
||||||
outMu.Unlock()
|
outMu.Unlock()
|
||||||
}()
|
}()
|
||||||
if !ok {
|
if !ok {
|
||||||
st.Status = "Not an OCI Object Storage object"
|
st.Status = "Not an OCI Object Storage object"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if o.storageTier == nil || (*o.storageTier != "archive") {
|
if o.storageTier == nil || (*o.storageTier != "archive") {
|
||||||
st.Status = "Object not in Archive storage tier"
|
st.Status = "Object not in Archive storage tier"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if operations.SkipDestructive(ctx, obj, "restore") {
|
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
reqCopy := req
|
reqCopy := req
|
||||||
reqCopy.BucketName = &bucket
|
reqCopy.BucketName = &bucket
|
||||||
reqCopy.ObjectName = &bucketPath
|
reqCopy.ObjectName = &bucketPath
|
||||||
var response objectstorage.RestoreObjectsResponse
|
var response objectstorage.RestoreObjectsResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
response, err = f.srv.RestoreObjects(ctx, reqCopy)
|
response, err = f.srv.RestoreObjects(ctx, reqCopy)
|
||||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
st.Status = err.Error()
|
st.Status = err.Error()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, err
|
return out, err
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1683,7 +1683,9 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend addurl pikpak:dirpath url
|
` + "```console" + `
|
||||||
|
rclone backend addurl pikpak:dirpath url
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
||||||
download will fallback to default 'My Pack' folder.
|
download will fallback to default 'My Pack' folder.
|
||||||
@@ -1695,8 +1697,10 @@ download will fallback to default 'My Pack' folder.
|
|||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
|
|
||||||
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
` + "```console" + `
|
||||||
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
||||||
|
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
An optional argument 'filename' can be specified for a file located in
|
An optional argument 'filename' can be specified for a file located in
|
||||||
'pikpak:dirpath'. You may want to pass '-o password=password' for a
|
'pikpak:dirpath'. You may want to pass '-o password=password' for a
|
||||||
@@ -1705,11 +1709,13 @@ source files after decompression finished.
|
|||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
{
|
` + "```json" + `
|
||||||
"Decompressed": 17,
|
{
|
||||||
"SourceDeleted": 0,
|
"Decompressed": 17,
|
||||||
"Errors": 0
|
"SourceDeleted": 0,
|
||||||
}
|
"Errors": 0
|
||||||
|
}
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
|||||||
182
backend/s3/s3.go
182
backend/s3/s3.go
@@ -2908,34 +2908,41 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
|
|||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
` + "```console" + `
|
||||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||||
|
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||||
|
|
||||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
` + "```console" + `
|
||||||
|
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
All the objects shown will be marked for restore, then
|
All the objects shown will be marked for restore, then
|
||||||
|
|
||||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
` + "```console" + `
|
||||||
|
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It returns a list of status dictionaries with Remote and Status
|
It returns a list of status dictionaries with Remote and Status
|
||||||
keys. The Status will be OK if it was successful or an error message
|
keys. The Status will be OK if it was successful or an error message
|
||||||
if not.
|
if not.
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"Status": "OK",
|
{
|
||||||
"Remote": "test.txt"
|
"Status": "OK",
|
||||||
},
|
"Remote": "test.txt"
|
||||||
{
|
},
|
||||||
"Status": "OK",
|
{
|
||||||
"Remote": "test/file4.txt"
|
"Status": "OK",
|
||||||
}
|
"Remote": "test/file4.txt"
|
||||||
]
|
}
|
||||||
|
]
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
||||||
@@ -2950,43 +2957,47 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
|
|||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend restore-status s3:bucket/path/to/object
|
` + "```console" + `
|
||||||
rclone backend restore-status s3:bucket/path/to/directory
|
rclone backend restore-status s3:bucket/path/to/object
|
||||||
rclone backend restore-status -o all s3:bucket/path/to/directory
|
rclone backend restore-status s3:bucket/path/to/directory
|
||||||
|
rclone backend restore-status -o all s3:bucket/path/to/directory
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This command does not obey the filters.
|
This command does not obey the filters.
|
||||||
|
|
||||||
It returns a list of status dictionaries.
|
It returns a list of status dictionaries.
|
||||||
|
|
||||||
[
|
` + "```json" + `
|
||||||
{
|
[
|
||||||
"Remote": "file.txt",
|
{
|
||||||
"VersionID": null,
|
"Remote": "file.txt",
|
||||||
"RestoreStatus": {
|
"VersionID": null,
|
||||||
"IsRestoreInProgress": true,
|
"RestoreStatus": {
|
||||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
"IsRestoreInProgress": true,
|
||||||
},
|
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||||
"StorageClass": "GLACIER"
|
|
||||||
},
|
},
|
||||||
{
|
"StorageClass": "GLACIER"
|
||||||
"Remote": "test.pdf",
|
},
|
||||||
"VersionID": null,
|
{
|
||||||
"RestoreStatus": {
|
"Remote": "test.pdf",
|
||||||
"IsRestoreInProgress": false,
|
"VersionID": null,
|
||||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
"RestoreStatus": {
|
||||||
},
|
"IsRestoreInProgress": false,
|
||||||
"StorageClass": "DEEP_ARCHIVE"
|
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||||
},
|
},
|
||||||
{
|
"StorageClass": "DEEP_ARCHIVE"
|
||||||
"Remote": "test.gz",
|
},
|
||||||
"VersionID": null,
|
{
|
||||||
"RestoreStatus": {
|
"Remote": "test.gz",
|
||||||
"IsRestoreInProgress": true,
|
"VersionID": null,
|
||||||
"RestoreExpiryDate": "null"
|
"RestoreStatus": {
|
||||||
},
|
"IsRestoreInProgress": true,
|
||||||
"StorageClass": "INTELLIGENT_TIERING"
|
"RestoreExpiryDate": "null"
|
||||||
}
|
},
|
||||||
]
|
"StorageClass": "INTELLIGENT_TIERING"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"all": "if set then show all objects, not just ones with restore status",
|
"all": "if set then show all objects, not just ones with restore status",
|
||||||
@@ -2996,7 +3007,11 @@ It returns a list of status dictionaries.
|
|||||||
Short: "List the unfinished multipart uploads.",
|
Short: "List the unfinished multipart uploads.",
|
||||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||||
|
|
||||||
rclone backend list-multipart s3:bucket/path/to/object
|
Usage Examples:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend list-multipart s3:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It returns a dictionary of buckets with values as lists of unfinished
|
It returns a dictionary of buckets with values as lists of unfinished
|
||||||
multipart uploads.
|
multipart uploads.
|
||||||
@@ -3004,27 +3019,28 @@ multipart uploads.
|
|||||||
You can call it with no bucket in which case it lists all bucket, with
|
You can call it with no bucket in which case it lists all bucket, with
|
||||||
a bucket or with a bucket and path.
|
a bucket or with a bucket and path.
|
||||||
|
|
||||||
{
|
` + "```json" + `
|
||||||
"rclone": [
|
{
|
||||||
|
"rclone": [
|
||||||
{
|
{
|
||||||
"Initiated": "2020-06-26T14:20:36Z",
|
"Initiated": "2020-06-26T14:20:36Z",
|
||||||
"Initiator": {
|
"Initiator": {
|
||||||
"DisplayName": "XXX",
|
"DisplayName": "XXX",
|
||||||
"ID": "arn:aws:iam::XXX:user/XXX"
|
"ID": "arn:aws:iam::XXX:user/XXX"
|
||||||
},
|
},
|
||||||
"Key": "KEY",
|
"Key": "KEY",
|
||||||
"Owner": {
|
"Owner": {
|
||||||
"DisplayName": null,
|
"DisplayName": null,
|
||||||
"ID": "XXX"
|
"ID": "XXX"
|
||||||
},
|
},
|
||||||
"StorageClass": "STANDARD",
|
"StorageClass": "STANDARD",
|
||||||
"UploadId": "XXX"
|
"UploadId": "XXX"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"rclone-1000files": [],
|
"rclone-1000files": [],
|
||||||
"rclone-dst": []
|
"rclone-dst": []
|
||||||
}
|
}
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "cleanup",
|
Name: "cleanup",
|
||||||
@@ -3035,8 +3051,12 @@ max-age which defaults to 24 hours.
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
rclone backend cleanup s3:bucket/path/to/object
|
Usage Examples:
|
||||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend cleanup s3:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||||
`,
|
`,
|
||||||
@@ -3052,7 +3072,11 @@ on a versions enabled bucket.
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
Usage Examples:
|
||||||
|
|
||||||
|
` + "```console" + `
|
||||||
|
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "versioning",
|
Name: "versioning",
|
||||||
@@ -3061,9 +3085,13 @@ it would do.
|
|||||||
passed and then returns the current versioning status for the bucket
|
passed and then returns the current versioning status for the bucket
|
||||||
supplied.
|
supplied.
|
||||||
|
|
||||||
rclone backend versioning s3:bucket # read status only
|
Usage Examples:
|
||||||
rclone backend versioning s3:bucket Enabled
|
|
||||||
rclone backend versioning s3:bucket Suspended
|
` + "```console" + `
|
||||||
|
rclone backend versioning s3:bucket # read status only
|
||||||
|
rclone backend versioning s3:bucket Enabled
|
||||||
|
rclone backend versioning s3:bucket Suspended
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
It may return "Enabled", "Suspended" or "Unversioned". Note that once versioning
|
It may return "Enabled", "Suspended" or "Unversioned". Note that once versioning
|
||||||
has been enabled the status can't be set back to "Unversioned".
|
has been enabled the status can't be set back to "Unversioned".
|
||||||
@@ -3076,9 +3104,11 @@ for a running s3 backend.
|
|||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
` + "```console" + `
|
||||||
rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
|
rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
|
rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user