mirror of
https://github.com/rclone/rclone.git
synced 2026-02-01 09:13:25 +00:00
Compare commits
92 Commits
v1.59.2
...
fix-6433-w
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f4d822626e | ||
|
|
be53dcc9c9 | ||
|
|
bd787e8f45 | ||
|
|
3cb7734eac | ||
|
|
d08ed7d1e9 | ||
|
|
f279e4ab01 | ||
|
|
35349657cd | ||
|
|
ce3b65e6dc | ||
|
|
0008cb4934 | ||
|
|
2ea5b4f0b8 | ||
|
|
b5818454f7 | ||
|
|
555def2da7 | ||
|
|
02b7613104 | ||
|
|
b342c6cf9c | ||
|
|
8a6857c295 | ||
|
|
21fd13f10d | ||
|
|
5cc7797f9e | ||
|
|
8bf2d6b6c8 | ||
|
|
85eb9776bd | ||
|
|
47539ec0e6 | ||
|
|
58b327a9f6 | ||
|
|
1107da7247 | ||
|
|
8d1fff9a82 | ||
|
|
2c5923ab1a | ||
|
|
1ad22b8881 | ||
|
|
0501773db1 | ||
|
|
cb8842941b | ||
|
|
5439a2c5c6 | ||
|
|
d347ac0154 | ||
|
|
9f33eb2e65 | ||
|
|
fe801b8fef | ||
|
|
6b158f33a3 | ||
|
|
5a6d233924 | ||
|
|
df513ca90a | ||
|
|
49bb640bae | ||
|
|
98fd00a655 | ||
|
|
16039b350d | ||
|
|
ebe86c6cec | ||
|
|
1f5e7ce598 | ||
|
|
4b981100db | ||
|
|
4344a3e2ea | ||
|
|
1542a979f9 | ||
|
|
81d242473a | ||
|
|
0ae171416f | ||
|
|
a59fa2977d | ||
|
|
7243918069 | ||
|
|
fa49971d49 | ||
|
|
77e3512714 | ||
|
|
337b43e7e4 | ||
|
|
6fd9e3d717 | ||
|
|
876f791ecd | ||
|
|
918bd6d3c3 | ||
|
|
f49be033c6 | ||
|
|
2a817e21cb | ||
|
|
a07d376fb1 | ||
|
|
e749bc58f4 | ||
|
|
821e084f28 | ||
|
|
2170376d1b | ||
|
|
8125b1cf08 | ||
|
|
ba60984f33 | ||
|
|
a875320e37 | ||
|
|
639624184d | ||
|
|
fe84cca1ad | ||
|
|
9d3958bd0b | ||
|
|
3a8e52de74 | ||
|
|
72227a0151 | ||
|
|
9f40cb114a | ||
|
|
2f461f13e3 | ||
|
|
7a24c173f6 | ||
|
|
fb60aeddae | ||
|
|
695736d1e4 | ||
|
|
f0396070eb | ||
|
|
f1166757ba | ||
|
|
9b76434ad5 | ||
|
|
440d0cd179 | ||
|
|
a047d30eca | ||
|
|
03d0f331f7 | ||
|
|
049674aeab | ||
|
|
50f053cada | ||
|
|
140af43c26 | ||
|
|
f467188876 | ||
|
|
4a4379b312 | ||
|
|
8c02fe7b89 | ||
|
|
11be920e90 | ||
|
|
8c19b355a5 | ||
|
|
67fd60275a | ||
|
|
b310490fa5 | ||
|
|
0ee0812a2b | ||
|
|
55bbff6346 | ||
|
|
9c6cfc1ff0 | ||
|
|
f753d7cd42 | ||
|
|
f5be1d6b65 |
35
.github/workflows/build.yml
vendored
35
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -39,9 +39,16 @@ jobs:
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -50,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -67,23 +74,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -222,7 +229,7 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
@@ -243,7 +250,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
|
||||
@@ -4,8 +4,8 @@ linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
#- goimports
|
||||
#- revive
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
|
||||
@@ -77,7 +77,7 @@ Make sure you
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
|
||||
When you are done with that push your changes to Github:
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
|
||||
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and Github ##
|
||||
## Using Git and GitHub ##
|
||||
|
||||
### Committing your changes ###
|
||||
|
||||
|
||||
124
MANUAL.html
generated
124
MANUAL.html
generated
@@ -19,7 +19,7 @@
|
||||
<header id="title-block-header">
|
||||
<h1 class="title">rclone(1) User Manual</h1>
|
||||
<p class="author">Nick Craig-Wood</p>
|
||||
<p class="date">Sep 15, 2022</p>
|
||||
<p class="date">Jul 09, 2022</p>
|
||||
</header>
|
||||
<h1 id="rclone-syncs-your-files-to-cloud-storage">Rclone syncs your files to cloud storage</h1>
|
||||
<p><img width="50%" src="https://rclone.org/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" ></p>
|
||||
@@ -300,7 +300,7 @@ go build</code></pre>
|
||||
<p>Run the <a href="https://rclone.org/commands/rclone_config_paths/">config paths</a> command to see the locations that rclone will use.</p>
|
||||
<p>To override them set the corresponding options (as command-line arguments, or as <a href="https://rclone.org/docs/#environment-variables">environment variables</a>): - <a href="https://rclone.org/docs/#config-config-file">--config</a> - <a href="https://rclone.org/docs/#cache-dir-dir">--cache-dir</a> - <a href="https://rclone.org/docs/#temp-dir-dir">--temp-dir</a></p>
|
||||
<h2 id="autostart">Autostart</h2>
|
||||
<p>After installing and configuring rclone, as described above, you are ready to use rclone as an interactive command line utility. If your goal is to perform <em>periodic</em> operations, such as a regular <a href="https://rclone.org/commands/rclone_sync/">sync</a>, you will probably want to configure your rclone command in your operating system's scheduler. If you need to expose <em>service</em>-like features, such as <a href="https://rclone.org/rc/">remote control</a>, <a href="https://rclone.org/gui/">GUI</a>, <a href="https://rclone.org/commands/rclone_serve/">serve</a> or <a href="https://rclone.org/commands/rclone_mount/">mount</a>, you will often want an rclone command always running in the background, and configuring it to run in a service infrastructure may be a better option. Below are some alternatives on how to achieve this on different operating systems.</p>
|
||||
<p>After installing and configuring rclone, as described above, you are ready to use rclone as an interactive command line utility. If your goal is to perform <em>periodic</em> operations, such as a regular <a href="https://rclone.org/commands/rclone_sync/">sync</a>, you will probably want to configure your rclone command in your operating system's scheduler. If you need to expose <em>service</em>-like features, such as <a href="https://rclone.org/rc/">remote control</a>, <a href="https://rclone.org/gui/">GUI</a>, <a href="https://rclone.org/commands/rclone_serve/">serve</a> or <a href="https://rclone.org/commands/rclone_move/">mount</a>, you will often want an rclone command always running in the background, and configuring it to run in a service infrastructure may be a better option. Below are some alternatives on how to achieve this on different operating systems.</p>
|
||||
<p>NOTE: Before setting up autorun it is highly recommended that you have tested your command manually from a Command Prompt first.</p>
|
||||
<h3 id="autostart-on-windows">Autostart on Windows</h3>
|
||||
<p>The most relevant alternatives for autostart on Windows are: - Run at user log on using the Startup folder - Run at user log on, at system startup or at schedule using Task Scheduler - Run at system startup using Windows service</p>
|
||||
@@ -309,7 +309,7 @@ go build</code></pre>
|
||||
<p>Example command to run a sync in background:</p>
|
||||
<pre><code>c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclone\logs\sync_files.txt</code></pre>
|
||||
<h4 id="user-account">User account</h4>
|
||||
<p>As mentioned in the <a href="https://rclone.org/commands/rclone_mount/">mount</a> documentation, mounted drives created as Administrator are not visible to other accounts, not even the account that was elevated as Administrator. By running the mount command as the built-in <code>SYSTEM</code> user account, it will create drives accessible for everyone on the system. Both scheduled task and Windows service can be used to achieve this.</p>
|
||||
<p>As mentioned in the <a href="https://rclone.org/commands/rclone_move/">mount</a> documentation, mounted drives created as Administrator are not visible to other accounts, not even the account that was elevated as Administrator. By running the mount command as the built-in <code>SYSTEM</code> user account, it will create drives accessible for everyone on the system. Both scheduled task and Windows service can be used to achieve this.</p>
|
||||
<p>NOTE: Remember that when rclone runs as the <code>SYSTEM</code> user, the user profile that it sees will not be yours. This means that if you normally run rclone with configuration file in the default location, to be able to use the same configuration when running as the system user you must explicitely tell rclone where to find it with the <a href="https://rclone.org/docs/#config-config-file"><code>--config</code></a> option, or else it will look in the system users profile path (<code>C:\Windows\System32\config\systemprofile</code>). To test your command manually from a Command Prompt, you can run it with the <a href="https://docs.microsoft.com/en-us/sysinternals/downloads/psexec">PsExec</a> utility from Microsoft's Sysinternals suite, which takes option <code>-s</code> to execute commands as the <code>SYSTEM</code> user.</p>
|
||||
<h4 id="start-from-startup-folder">Start from Startup folder</h4>
|
||||
<p>To quickly execute an rclone command you can simply create a standard Windows Explorer shortcut for the complete rclone command you want to run. If you store this shortcut in the special "Startup" start-menu folder, Windows will automatically run it at login. To open this folder in Windows Explorer, enter path <code>%APPDATA%\Microsoft\Windows\Start Menu\Programs\Startup</code>, or <code>C:\ProgramData\Microsoft\Windows\Start Menu\Programs\StartUp</code> if you want the command to start for <em>every</em> user that logs in.</p>
|
||||
@@ -469,7 +469,6 @@ destpath/sourcepath/two.txt</code></pre>
|
||||
<p>Note that files in the destination won't be deleted if there were any errors at any point. Duplicate objects (files with the same name, on those providers that support it) are also not yet handled.</p>
|
||||
<p>It is always the contents of the directory that is synced, not the directory itself. So when source:path is a directory, it's the contents of source:path that are copied, not the directory name and contents. See extended explanation in the <a href="https://rclone.org/commands/rclone_copy/">copy</a> command if unsure.</p>
|
||||
<p>If dest:path doesn't exist, it is created and the source:path contents go there.</p>
|
||||
<p>It is not possible to sync overlapping remotes. However, you may exclude the destination from the sync with a filter rule or by putting an exclude-if-present file inside the destination directory and sync to a destination that is inside the source directory.</p>
|
||||
<p><strong>Note</strong>: Use the <code>-P</code>/<code>--progress</code> flag to view real-time transfer statistics</p>
|
||||
<p><strong>Note</strong>: Use the <code>rclone dedupe</code> command to deal with "Duplicate object/directory found in source/destination - ignoring" errors. See <a href="https://forum.rclone.org/t/sync-not-clearing-duplicates/14372">this forum post</a> for more info.</p>
|
||||
<pre><code>rclone sync source:path dest:path [flags]</code></pre>
|
||||
@@ -4237,7 +4236,7 @@ rclone sync -i /path/to/files remote:current-backup</code></pre>
|
||||
<h3 id="backup-dirdir">--backup-dir=DIR</h3>
|
||||
<p>When using <code>sync</code>, <code>copy</code> or <code>move</code> any files which would have been overwritten or deleted are moved in their original hierarchy into this directory.</p>
|
||||
<p>If <code>--suffix</code> is set, then the moved files will have the suffix added to them. If there is a file with the same path (after the suffix has been added) in DIR, then it will be overwritten.</p>
|
||||
<p>The remote in use must support server-side move or copy and you must use the same remote as the destination of the sync. The backup directory must not overlap the destination directory without it being excluded by a filter rule.</p>
|
||||
<p>The remote in use must support server-side move or copy and you must use the same remote as the destination of the sync. The backup directory must not overlap the destination directory.</p>
|
||||
<p>For example</p>
|
||||
<pre><code>rclone sync -i /path/to/local remote:current --backup-dir remote:old</code></pre>
|
||||
<p>will sync <code>/path/to/local</code> to <code>remote:current</code>, but for any files which would have been updated or deleted will be stored in <code>remote:old</code>.</p>
|
||||
@@ -8379,7 +8378,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.59.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.59.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)</code></pre>
|
||||
<h2 id="backend-flags">Backend Flags</h2>
|
||||
<p>These flags are available for every command. They control the backends and may be set in the config file.</p>
|
||||
@@ -17157,7 +17156,7 @@ remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"</code></pre>
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"</code></pre>
|
||||
<p>If you then add that config to your config file (find it with <code>rclone config file</code>) then you can access all the shared drives in one place with the <code>AllDrives:</code> remote.</p>
|
||||
<p>See <a href="https://rclone.org/drive/#drives">the Google Drive docs</a> for full info.</p>
|
||||
<h3 id="standard-options-11">Standard options</h3>
|
||||
@@ -19658,7 +19657,7 @@ remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"</code></pre>
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"</code></pre>
|
||||
<p>Adding this to the rclone config file will cause those team drives to be accessible with the aliases shown. Any illegal charactes will be substituted with "_" and duplicate names will have numbers suffixed. It will also add a remote called AllDrives which shows all the shared drives combined into one directory tree.</p>
|
||||
<h3 id="untrash">untrash</h3>
|
||||
<p>Untrash files and directories</p>
|
||||
@@ -20953,8 +20952,8 @@ y/e/d> y</code></pre>
|
||||
<p>The Internet Archive backend utilizes Items on <a href="https://archive.org/">archive.org</a></p>
|
||||
<p>Refer to <a href="https://archive.org/services/docs/api/ias3.html">IAS3 API documentation</a> for the API this backend uses.</p>
|
||||
<p>Paths are specified as <code>remote:bucket</code> (or <code>remote:</code> for the <code>lsd</code> command.) You may put subdirectories in too, e.g. <code>remote:item/path/to/dir</code>.</p>
|
||||
<p>Once you have made a remote (see the provider specific section above) you can use it like this:</p>
|
||||
<p>Unlike S3, listing up all items uploaded by you isn't supported.</p>
|
||||
<p>Once you have made a remote, you can use it like this:</p>
|
||||
<p>Make a new item</p>
|
||||
<pre><code>rclone mkdir remote:item</code></pre>
|
||||
<p>List the contents of a item</p>
|
||||
@@ -20966,7 +20965,7 @@ y/e/d> y</code></pre>
|
||||
<p>You can optionally wait for the server's processing to finish, by setting non-zero value to <code>wait_archive</code> key. By making it wait, rclone can do normal file comparison. Make sure to set a large enough value (e.g. <code>30m0s</code> for smaller files) as it can take a long time depending on server's queue.</p>
|
||||
<h2 id="about-metadata">About metadata</h2>
|
||||
<p>This backend supports setting, updating and reading metadata of each file. The metadata will appear as file metadata on Internet Archive. However, some fields are reserved by both Internet Archive and rclone.</p>
|
||||
<p>The following are reserved by Internet Archive: - <code>name</code> - <code>source</code> - <code>size</code> - <code>md5</code> - <code>crc32</code> - <code>sha1</code> - <code>format</code> - <code>old_version</code> - <code>viruscheck</code> - <code>summation</code></p>
|
||||
<p>The following are reserved by Internet Archive: - <code>name</code> - <code>source</code> - <code>size</code> - <code>md5</code> - <code>crc32</code> - <code>sha1</code> - <code>format</code> - <code>old_version</code> - <code>viruscheck</code></p>
|
||||
<p>Trying to set values to these keys is ignored with a warning. Only setting <code>mtime</code> is an exception. Doing so make it the identical behavior as setting ModTime.</p>
|
||||
<p>rclone reserves all the keys starting with <code>rclone-</code>. Setting value for these keys will give you warnings, but values are set according to request.</p>
|
||||
<p>If there are multiple values for a key, only the first one is returned. This is a limitation of rclone, that supports one value per one key. It can be triggered when you did a server-side copy.</p>
|
||||
@@ -21139,42 +21138,42 @@ y/e/d> y</code></pre>
|
||||
<td>CRC32 calculated by Internet Archive</td>
|
||||
<td>string</td>
|
||||
<td>01234567</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td>format</td>
|
||||
<td>Name of format identified by Internet Archive</td>
|
||||
<td>string</td>
|
||||
<td>Comma-Separated Values</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td>md5</td>
|
||||
<td>MD5 hash calculated by Internet Archive</td>
|
||||
<td>string</td>
|
||||
<td>01234567012345670123456701234567</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td>mtime</td>
|
||||
<td>Time of last modification, managed by Rclone</td>
|
||||
<td>RFC 3339</td>
|
||||
<td>2006-01-02T15:04:05.999999999Z</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td>name</td>
|
||||
<td>Full file path, without the bucket part</td>
|
||||
<td>filename</td>
|
||||
<td>backend/internetarchive/internetarchive.go</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td>old_version</td>
|
||||
<td>Whether the file was replaced and moved by keep-old-version flag</td>
|
||||
<td>boolean</td>
|
||||
<td>true</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td>rclone-ia-mtime</td>
|
||||
@@ -21202,35 +21201,28 @@ y/e/d> y</code></pre>
|
||||
<td>SHA1 hash calculated by Internet Archive</td>
|
||||
<td>string</td>
|
||||
<td>0123456701234567012345670123456701234567</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td>size</td>
|
||||
<td>File size in bytes</td>
|
||||
<td>decimal number</td>
|
||||
<td>123456</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td>source</td>
|
||||
<td>The source of the file</td>
|
||||
<td>string</td>
|
||||
<td>original</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td>summation</td>
|
||||
<td>Check https://forum.rclone.org/t/31922 for how it is used</td>
|
||||
<td>string</td>
|
||||
<td>md5</td>
|
||||
<td><strong>Y</strong></td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td>viruscheck</td>
|
||||
<td>The last time viruscheck process was run for the file (?)</td>
|
||||
<td>unixtime</td>
|
||||
<td>1654191352</td>
|
||||
<td><strong>Y</strong></td>
|
||||
<td>N</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -27765,84 +27757,6 @@ $ tree /tmp/b
|
||||
<li>"error": return an error based on option value</li>
|
||||
</ul>
|
||||
<h1 id="changelog">Changelog</h1>
|
||||
<h2 id="v1.59.2---2022-09-15">v1.59.2 - 2022-09-15</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.59.1...v1.59.2">See commits</a></p>
|
||||
<ul>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>config: Move locking to fix fatal error: concurrent map read and map write (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Local
|
||||
<ul>
|
||||
<li>Disable xattr support if the filesystems indicates it is not supported (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Azure Blob
|
||||
<ul>
|
||||
<li>Fix chunksize calculations producing too many parts (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>B2
|
||||
<ul>
|
||||
<li>Fix chunksize calculations producing too many parts (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>S3
|
||||
<ul>
|
||||
<li>Fix chunksize calculations producing too many parts (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.59.1---2022-08-08">v1.59.1 - 2022-08-08</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.59.0...v1.59.1">See commits</a></p>
|
||||
<ul>
|
||||
<li>Bug Fixes
|
||||
<ul>
|
||||
<li>accounting: Fix panic in core/stats-reset with unknown group (Nick Craig-Wood)</li>
|
||||
<li>build: Fix android build after GitHub actions change (Nick Craig-Wood)</li>
|
||||
<li>dlna: Fix SOAP action header parsing (Joram Schrijver)</li>
|
||||
<li>docs: Fix links to mount command from install docs (albertony)</li>
|
||||
<li>dropox: Fix ChangeNotify was unable to decrypt errors (Nick Craig-Wood)</li>
|
||||
<li>fs: Fix parsing of times and durations of the form "YYYY-MM-DD HH:MM:SS" (Nick Craig-Wood)</li>
|
||||
<li>serve sftp: Fix checksum detection (Nick Craig-Wood)</li>
|
||||
<li>sync: Add accidentally missed filter-sensitivity to --backup-dir option (Nick Naumann)</li>
|
||||
</ul></li>
|
||||
<li>Combine
|
||||
<ul>
|
||||
<li>Fix docs showing <code>remote=</code> instead of <code>upstreams=</code> (Nick Craig-Wood)</li>
|
||||
<li>Throw error if duplicate directory name is specified (Nick Craig-Wood)</li>
|
||||
<li>Fix errors with backends shutting down while in use (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Dropbox
|
||||
<ul>
|
||||
<li>Fix hang on quit with --dropbox-batch-mode off (Nick Craig-Wood)</li>
|
||||
<li>Fix infinite loop on uploading a corrupted file (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>Internetarchive
|
||||
<ul>
|
||||
<li>Ignore checksums for files using the different method (Lesmiscore)</li>
|
||||
<li>Handle hash symbol in the middle of filename (Lesmiscore)</li>
|
||||
</ul></li>
|
||||
<li>Jottacloud
|
||||
<ul>
|
||||
<li>Fix working with whitelabel Elgiganten Cloud</li>
|
||||
<li>Do not store username in config when using standard auth (albertony)</li>
|
||||
</ul></li>
|
||||
<li>Mega
|
||||
<ul>
|
||||
<li>Fix nil pointer exception when bad node received (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>S3
|
||||
<ul>
|
||||
<li>Fix --s3-no-head panic: reflect: Elem of invalid type s3.PutObjectInput (Nick Craig-Wood)</li>
|
||||
</ul></li>
|
||||
<li>SFTP
|
||||
<ul>
|
||||
<li>Fix issue with WS_FTP by working around failing RealPath (albertony)</li>
|
||||
</ul></li>
|
||||
<li>Union
|
||||
<ul>
|
||||
<li>Fix duplicated files when using directories with leading / (Nick Craig-Wood)</li>
|
||||
<li>Fix multiple files being uploaded when roots don't exist (Nick Craig-Wood)</li>
|
||||
<li>Fix panic due to misalignment of struct field in 32 bit architectures (r-ricci)</li>
|
||||
</ul></li>
|
||||
</ul>
|
||||
<h2 id="v1.59.0---2022-07-09">v1.59.0 - 2022-07-09</h2>
|
||||
<p><a href="https://github.com/rclone/rclone/compare/v1.58.0...v1.59.0">See commits</a></p>
|
||||
<ul>
|
||||
|
||||
99
MANUAL.md
generated
99
MANUAL.md
generated
@@ -1,6 +1,6 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Sep 15, 2022
|
||||
% Jul 09, 2022
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -506,7 +506,7 @@ such as a regular [sync](https://rclone.org/commands/rclone_sync/), you will pro
|
||||
to configure your rclone command in your operating system's scheduler. If you need to
|
||||
expose *service*-like features, such as [remote control](https://rclone.org/rc/),
|
||||
[GUI](https://rclone.org/gui/), [serve](https://rclone.org/commands/rclone_serve/)
|
||||
or [mount](https://rclone.org/commands/rclone_mount/), you will often want an rclone
|
||||
or [mount](https://rclone.org/commands/rclone_move/), you will often want an rclone
|
||||
command always running in the background, and configuring it to run in a service infrastructure
|
||||
may be a better option. Below are some alternatives on how to achieve this on
|
||||
different operating systems.
|
||||
@@ -539,7 +539,7 @@ c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclo
|
||||
|
||||
#### User account
|
||||
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_mount/) documentation,
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_move/) documentation,
|
||||
mounted drives created as Administrator are not visible to other accounts, not even the
|
||||
account that was elevated as Administrator. By running the mount command as the
|
||||
built-in `SYSTEM` user account, it will create drives accessible for everyone on
|
||||
@@ -897,11 +897,6 @@ extended explanation in the [copy](https://rclone.org/commands/rclone_copy/) com
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
It is not possible to sync overlapping remotes. However, you may exclude
|
||||
the destination from the sync with a filter rule or by putting an
|
||||
exclude-if-present file inside the destination directory and sync to a
|
||||
destination that is inside the source directory.
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
|
||||
**Note**: Use the `rclone dedupe` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
|
||||
@@ -8740,8 +8735,7 @@ been added) in DIR, then it will be overwritten.
|
||||
|
||||
The remote in use must support server-side move or copy and you must
|
||||
use the same remote as the destination of the sync. The backup
|
||||
directory must not overlap the destination directory without it being
|
||||
excluded by a filter rule.
|
||||
directory must not overlap the destination directory.
|
||||
|
||||
For example
|
||||
|
||||
@@ -14342,7 +14336,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.59.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.59.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -25142,7 +25136,7 @@ This would produce something like this:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
If you then add that config to your config file (find it with `rclone
|
||||
config file`) then you can access all the shared drives in one place
|
||||
@@ -28349,7 +28343,7 @@ drives found and a combined drive.
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
@@ -30501,9 +30495,10 @@ Refer to [IAS3 API documentation](https://archive.org/services/docs/api/ias3.htm
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:item/path/to/dir`.
|
||||
|
||||
Unlike S3, listing up all items uploaded by you isn't supported.
|
||||
Once you have made a remote (see the provider specific section above)
|
||||
you can use it like this:
|
||||
|
||||
Once you have made a remote, you can use it like this:
|
||||
Unlike S3, listing up all items uploaded by you isn't supported.
|
||||
|
||||
Make a new item
|
||||
|
||||
@@ -30541,7 +30536,6 @@ The following are reserved by Internet Archive:
|
||||
- `format`
|
||||
- `old_version`
|
||||
- `viruscheck`
|
||||
- `summation`
|
||||
|
||||
Trying to set values to these keys is ignored with a warning.
|
||||
Only setting `mtime` is an exception. Doing so make it the identical behavior as setting ModTime.
|
||||
@@ -30747,20 +30741,19 @@ Here are the possible system metadata items for the internetarchive backend.
|
||||
|
||||
| Name | Help | Type | Example | Read Only |
|
||||
|------|------|------|---------|-----------|
|
||||
| crc32 | CRC32 calculated by Internet Archive | string | 01234567 | **Y** |
|
||||
| format | Name of format identified by Internet Archive | string | Comma-Separated Values | **Y** |
|
||||
| md5 | MD5 hash calculated by Internet Archive | string | 01234567012345670123456701234567 | **Y** |
|
||||
| mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | **Y** |
|
||||
| name | Full file path, without the bucket part | filename | backend/internetarchive/internetarchive.go | **Y** |
|
||||
| old_version | Whether the file was replaced and moved by keep-old-version flag | boolean | true | **Y** |
|
||||
| crc32 | CRC32 calculated by Internet Archive | string | 01234567 | N |
|
||||
| format | Name of format identified by Internet Archive | string | Comma-Separated Values | N |
|
||||
| md5 | MD5 hash calculated by Internet Archive | string | 01234567012345670123456701234567 | N |
|
||||
| mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| name | Full file path, without the bucket part | filename | backend/internetarchive/internetarchive.go | N |
|
||||
| old_version | Whether the file was replaced and moved by keep-old-version flag | boolean | true | N |
|
||||
| rclone-ia-mtime | Time of last modification, managed by Internet Archive | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| rclone-mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| rclone-update-track | Random value used by Rclone for tracking changes inside Internet Archive | string | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | N |
|
||||
| sha1 | SHA1 hash calculated by Internet Archive | string | 0123456701234567012345670123456701234567 | **Y** |
|
||||
| size | File size in bytes | decimal number | 123456 | **Y** |
|
||||
| source | The source of the file | string | original | **Y** |
|
||||
| summation | Check https://forum.rclone.org/t/31922 for how it is used | string | md5 | **Y** |
|
||||
| viruscheck | The last time viruscheck process was run for the file (?) | unixtime | 1654191352 | **Y** |
|
||||
| sha1 | SHA1 hash calculated by Internet Archive | string | 0123456701234567012345670123456701234567 | N |
|
||||
| size | File size in bytes | decimal number | 123456 | N |
|
||||
| source | The source of the file | string | original | N |
|
||||
| viruscheck | The last time viruscheck process was run for the file (?) | unixtime | 1654191352 | N |
|
||||
|
||||
See the [metadata](https://rclone.org/docs/#metadata) docs for more info.
|
||||
|
||||
@@ -39420,58 +39413,6 @@ Options:
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.59.2 - 2022-09-15
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.59.1...v1.59.2)
|
||||
|
||||
* Bug Fixes
|
||||
* config: Move locking to fix fatal error: concurrent map read and map write (Nick Craig-Wood)
|
||||
* Local
|
||||
* Disable xattr support if the filesystems indicates it is not supported (Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Fix chunksize calculations producing too many parts (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix chunksize calculations producing too many parts (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix chunksize calculations producing too many parts (Nick Craig-Wood)
|
||||
|
||||
## v1.59.1 - 2022-08-08
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.59.0...v1.59.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix panic in core/stats-reset with unknown group (Nick Craig-Wood)
|
||||
* build: Fix android build after GitHub actions change (Nick Craig-Wood)
|
||||
* dlna: Fix SOAP action header parsing (Joram Schrijver)
|
||||
* docs: Fix links to mount command from install docs (albertony)
|
||||
* dropox: Fix ChangeNotify was unable to decrypt errors (Nick Craig-Wood)
|
||||
* fs: Fix parsing of times and durations of the form "YYYY-MM-DD HH:MM:SS" (Nick Craig-Wood)
|
||||
* serve sftp: Fix checksum detection (Nick Craig-Wood)
|
||||
* sync: Add accidentally missed filter-sensitivity to --backup-dir option (Nick Naumann)
|
||||
* Combine
|
||||
* Fix docs showing `remote=` instead of `upstreams=` (Nick Craig-Wood)
|
||||
* Throw error if duplicate directory name is specified (Nick Craig-Wood)
|
||||
* Fix errors with backends shutting down while in use (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Fix hang on quit with --dropbox-batch-mode off (Nick Craig-Wood)
|
||||
* Fix infinite loop on uploading a corrupted file (Nick Craig-Wood)
|
||||
* Internetarchive
|
||||
* Ignore checksums for files using the different method (Lesmiscore)
|
||||
* Handle hash symbol in the middle of filename (Lesmiscore)
|
||||
* Jottacloud
|
||||
* Fix working with whitelabel Elgiganten Cloud
|
||||
* Do not store username in config when using standard auth (albertony)
|
||||
* Mega
|
||||
* Fix nil pointer exception when bad node received (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix --s3-no-head panic: reflect: Elem of invalid type s3.PutObjectInput (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Fix issue with WS_FTP by working around failing RealPath (albertony)
|
||||
* Union
|
||||
* Fix duplicated files when using directories with leading / (Nick Craig-Wood)
|
||||
* Fix multiple files being uploaded when roots don't exist (Nick Craig-Wood)
|
||||
* Fix panic due to misalignment of struct field in 32 bit architectures (r-ricci)
|
||||
|
||||
## v1.59.0 - 2022-07-09
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.58.0...v1.59.0)
|
||||
|
||||
177
MANUAL.txt
generated
177
MANUAL.txt
generated
@@ -1,6 +1,6 @@
|
||||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Sep 15, 2022
|
||||
Jul 09, 2022
|
||||
|
||||
Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -857,11 +857,6 @@ extended explanation in the copy command if unsure.
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
It is not possible to sync overlapping remotes. However, you may exclude
|
||||
the destination from the sync with a filter rule or by putting an
|
||||
exclude-if-present file inside the destination directory and sync to a
|
||||
destination that is inside the source directory.
|
||||
|
||||
Note: Use the -P/--progress flag to view real-time transfer statistics
|
||||
|
||||
Note: Use the rclone dedupe command to deal with "Duplicate
|
||||
@@ -8337,8 +8332,7 @@ added) in DIR, then it will be overwritten.
|
||||
|
||||
The remote in use must support server-side move or copy and you must use
|
||||
the same remote as the destination of the sync. The backup directory
|
||||
must not overlap the destination directory without it being excluded by
|
||||
a filter rule.
|
||||
must not overlap the destination directory.
|
||||
|
||||
For example
|
||||
|
||||
@@ -13893,7 +13887,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.59.2")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.59.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
|
||||
Backend Flags
|
||||
@@ -24550,7 +24544,7 @@ This would produce something like this:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
If you then add that config to your config file (find it with
|
||||
rclone config file) then you can access all the shared drives in one
|
||||
@@ -27758,7 +27752,7 @@ found and a combined drive.
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to be
|
||||
accessible with the aliases shown. Any illegal charactes will be
|
||||
@@ -29892,9 +29886,10 @@ Refer to IAS3 API documentation for the API this backend uses.
|
||||
Paths are specified as remote:bucket (or remote: for the lsd command.)
|
||||
You may put subdirectories in too, e.g. remote:item/path/to/dir.
|
||||
|
||||
Unlike S3, listing up all items uploaded by you isn't supported.
|
||||
Once you have made a remote (see the provider specific section above)
|
||||
you can use it like this:
|
||||
|
||||
Once you have made a remote, you can use it like this:
|
||||
Unlike S3, listing up all items uploaded by you isn't supported.
|
||||
|
||||
Make a new item
|
||||
|
||||
@@ -29934,7 +29929,7 @@ file. The metadata will appear as file metadata on Internet Archive.
|
||||
However, some fields are reserved by both Internet Archive and rclone.
|
||||
|
||||
The following are reserved by Internet Archive: - name - source - size -
|
||||
md5 - crc32 - sha1 - format - old_version - viruscheck - summation
|
||||
md5 - crc32 - sha1 - format - old_version - viruscheck
|
||||
|
||||
Trying to set values to these keys is ignored with a warning. Only
|
||||
setting mtime is an exception. Doing so make it the identical behavior
|
||||
@@ -30145,52 +30140,65 @@ including them.
|
||||
Here are the possible system metadata items for the internetarchive
|
||||
backend.
|
||||
|
||||
--------------------------------------------------------------------------------------------------------------------------------------
|
||||
Name Help Type Example Read Only
|
||||
--------------------- ---------------------------------- ----------- -------------------------------------------- --------------------
|
||||
crc32 CRC32 calculated by Internet string 01234567 Y
|
||||
Archive
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
Name Help Type Example Read Only
|
||||
--------------------- ------------------ ----------- -------------------------------------------- --------------------
|
||||
crc32 CRC32 calculated string 01234567 N
|
||||
by Internet
|
||||
Archive
|
||||
|
||||
format Name of format identified by string Comma-Separated Values Y
|
||||
Internet Archive
|
||||
format Name of format string Comma-Separated Values N
|
||||
identified by
|
||||
Internet Archive
|
||||
|
||||
md5 MD5 hash calculated by Internet string 01234567012345670123456701234567 Y
|
||||
Archive
|
||||
md5 MD5 hash string 01234567012345670123456701234567 N
|
||||
calculated by
|
||||
Internet Archive
|
||||
|
||||
mtime Time of last modification, managed RFC 3339 2006-01-02T15:04:05.999999999Z Y
|
||||
by Rclone
|
||||
mtime Time of last RFC 3339 2006-01-02T15:04:05.999999999Z N
|
||||
modification,
|
||||
managed by Rclone
|
||||
|
||||
name Full file path, without the bucket filename backend/internetarchive/internetarchive.go Y
|
||||
part
|
||||
name Full file path, filename backend/internetarchive/internetarchive.go N
|
||||
without the bucket
|
||||
part
|
||||
|
||||
old_version Whether the file was replaced and boolean true Y
|
||||
moved by keep-old-version flag
|
||||
old_version Whether the file boolean true N
|
||||
was replaced and
|
||||
moved by
|
||||
keep-old-version
|
||||
flag
|
||||
|
||||
rclone-ia-mtime Time of last modification, managed RFC 3339 2006-01-02T15:04:05.999999999Z N
|
||||
by Internet Archive
|
||||
rclone-ia-mtime Time of last RFC 3339 2006-01-02T15:04:05.999999999Z N
|
||||
modification,
|
||||
managed by
|
||||
Internet Archive
|
||||
|
||||
rclone-mtime Time of last modification, managed RFC 3339 2006-01-02T15:04:05.999999999Z N
|
||||
by Rclone
|
||||
rclone-mtime Time of last RFC 3339 2006-01-02T15:04:05.999999999Z N
|
||||
modification,
|
||||
managed by Rclone
|
||||
|
||||
rclone-update-track Random value used by Rclone for string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa N
|
||||
tracking changes inside Internet
|
||||
Archive
|
||||
rclone-update-track Random value used string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa N
|
||||
by Rclone for
|
||||
tracking changes
|
||||
inside Internet
|
||||
Archive
|
||||
|
||||
sha1 SHA1 hash calculated by Internet string 0123456701234567012345670123456701234567 Y
|
||||
Archive
|
||||
sha1 SHA1 hash string 0123456701234567012345670123456701234567 N
|
||||
calculated by
|
||||
Internet Archive
|
||||
|
||||
size File size in bytes decimal 123456 Y
|
||||
number
|
||||
size File size in bytes decimal 123456 N
|
||||
number
|
||||
|
||||
source The source of the file string original Y
|
||||
source The source of the string original N
|
||||
file
|
||||
|
||||
summation Check string md5 Y
|
||||
https://forum.rclone.org/t/31922
|
||||
for how it is used
|
||||
|
||||
viruscheck The last time viruscheck process unixtime 1654191352 Y
|
||||
was run for the file (?)
|
||||
--------------------------------------------------------------------------------------------------------------------------------------
|
||||
viruscheck The last time unixtime 1654191352 N
|
||||
viruscheck process
|
||||
was run for the
|
||||
file (?)
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
|
||||
See the metadata docs for more info.
|
||||
|
||||
@@ -38931,79 +38939,6 @@ Options:
|
||||
|
||||
Changelog
|
||||
|
||||
v1.59.2 - 2022-09-15
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- config: Move locking to fix fatal error: concurrent map read and
|
||||
map write (Nick Craig-Wood)
|
||||
- Local
|
||||
- Disable xattr support if the filesystems indicates it is not
|
||||
supported (Nick Craig-Wood)
|
||||
- Azure Blob
|
||||
- Fix chunksize calculations producing too many parts (Nick
|
||||
Craig-Wood)
|
||||
- B2
|
||||
- Fix chunksize calculations producing too many parts (Nick
|
||||
Craig-Wood)
|
||||
- S3
|
||||
- Fix chunksize calculations producing too many parts (Nick
|
||||
Craig-Wood)
|
||||
|
||||
v1.59.1 - 2022-08-08
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- accounting: Fix panic in core/stats-reset with unknown group
|
||||
(Nick Craig-Wood)
|
||||
- build: Fix android build after GitHub actions change (Nick
|
||||
Craig-Wood)
|
||||
- dlna: Fix SOAP action header parsing (Joram Schrijver)
|
||||
- docs: Fix links to mount command from install docs (albertony)
|
||||
- dropox: Fix ChangeNotify was unable to decrypt errors (Nick
|
||||
Craig-Wood)
|
||||
- fs: Fix parsing of times and durations of the form "YYYY-MM-DD
|
||||
HH:MM:SS" (Nick Craig-Wood)
|
||||
- serve sftp: Fix checksum detection (Nick Craig-Wood)
|
||||
- sync: Add accidentally missed filter-sensitivity to --backup-dir
|
||||
option (Nick Naumann)
|
||||
- Combine
|
||||
- Fix docs showing remote= instead of upstreams= (Nick Craig-Wood)
|
||||
- Throw error if duplicate directory name is specified (Nick
|
||||
Craig-Wood)
|
||||
- Fix errors with backends shutting down while in use (Nick
|
||||
Craig-Wood)
|
||||
- Dropbox
|
||||
- Fix hang on quit with --dropbox-batch-mode off (Nick Craig-Wood)
|
||||
- Fix infinite loop on uploading a corrupted file (Nick
|
||||
Craig-Wood)
|
||||
- Internetarchive
|
||||
- Ignore checksums for files using the different method
|
||||
(Lesmiscore)
|
||||
- Handle hash symbol in the middle of filename (Lesmiscore)
|
||||
- Jottacloud
|
||||
- Fix working with whitelabel Elgiganten Cloud
|
||||
- Do not store username in config when using standard auth
|
||||
(albertony)
|
||||
- Mega
|
||||
- Fix nil pointer exception when bad node received (Nick
|
||||
Craig-Wood)
|
||||
- S3
|
||||
- Fix --s3-no-head panic: reflect: Elem of invalid type
|
||||
s3.PutObjectInput (Nick Craig-Wood)
|
||||
- SFTP
|
||||
- Fix issue with WS_FTP by working around failing RealPath
|
||||
(albertony)
|
||||
- Union
|
||||
- Fix duplicated files when using directories with leading / (Nick
|
||||
Craig-Wood)
|
||||
- Fix multiple files being uploaded when roots don't exist (Nick
|
||||
Craig-Wood)
|
||||
- Fix panic due to misalignment of struct field in 32 bit
|
||||
architectures (r-ricci)
|
||||
|
||||
v1.59.0 - 2022-07-09
|
||||
|
||||
See commits
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package alias implements a virtual provider to rename existing remotes.
|
||||
package alias
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package all imports all the backends
|
||||
package all
|
||||
|
||||
import (
|
||||
|
||||
@@ -556,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
//
|
||||
// This is a workaround for Amazon sometimes returning
|
||||
//
|
||||
// * 408 REQUEST_TIMEOUT
|
||||
// * 504 GATEWAY_TIMEOUT
|
||||
// * 500 Internal server error
|
||||
// - 408 REQUEST_TIMEOUT
|
||||
// - 504 GATEWAY_TIMEOUT
|
||||
// - 500 Internal server error
|
||||
//
|
||||
// At the end of large uploads. The speculation is that the timeout
|
||||
// is waiting for the sha1 hashing to complete and the file may well
|
||||
@@ -626,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -685,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1002,7 +1002,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
@@ -1115,7 +1114,7 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1247,9 +1246,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1356,11 +1355,12 @@ func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
metadata := info.NewMetadata()
|
||||
size := info.ContentLength()
|
||||
@@ -1443,10 +1443,11 @@ func (o *Object) clearMetaData() {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
container, _ := o.split()
|
||||
if !o.fs.containerOK(container) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Backblaze B2 API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -238,7 +239,7 @@ type GetFileInfoRequest struct {
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// 10 number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system.
|
||||
package b2
|
||||
|
||||
// FIXME should we remove sha1 checks from here as rclone now supports
|
||||
@@ -656,15 +656,15 @@ var errEndList = errors.New("end list")
|
||||
//
|
||||
// (bucket, directory) is the starting directory
|
||||
//
|
||||
// If prefix is set then it is removed from all file names
|
||||
// If prefix is set then it is removed from all file names.
|
||||
//
|
||||
// If addBucket is set then it adds the bucket to the start of the
|
||||
// remotes generated
|
||||
// remotes generated.
|
||||
//
|
||||
// If recurse is set the function will recursively list
|
||||
// If recurse is set the function will recursively list.
|
||||
//
|
||||
// If limit is > 0 then it limits to that many files (must be less
|
||||
// than 1000)
|
||||
// than 1000).
|
||||
//
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
//
|
||||
@@ -1025,7 +1025,7 @@ func (f *Fs) clearBucketID(bucket string) {
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1334,9 +1334,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1478,7 +1478,7 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// Clean the SHA1
|
||||
//
|
||||
// Make sure it is lower case
|
||||
// Make sure it is lower case.
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (e.g. Cyberduck) use this
|
||||
@@ -1490,10 +1490,11 @@ func cleanSHA1(sha1 string) string {
|
||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
|
||||
o.id = ID
|
||||
o.sha1 = SHA1
|
||||
@@ -1512,10 +1513,11 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaData(info *api.File) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
@@ -1523,10 +1525,11 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
|
||||
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
||||
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
|
||||
}
|
||||
@@ -1584,10 +1587,11 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
//
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.sha1
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.id != "" {
|
||||
return nil
|
||||
|
||||
@@ -14,7 +14,7 @@ const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -266,7 +266,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -692,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -752,7 +752,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -792,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists
|
||||
// This will produce an error if the object already exists.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -877,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -995,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1235,7 +1235,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1346,9 +1345,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
// The new object may have been created if an error is returned.
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
|
||||
1
backend/cache/cache.go
vendored
1
backend/cache/cache.go
vendored
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
||||
import (
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
//
|
||||
// Chunker's composite files have one or more chunks
|
||||
// and optional metadata object. If it's present,
|
||||
// meta object is named after the original file.
|
||||
@@ -65,7 +64,7 @@ import (
|
||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||
//
|
||||
// When transactions is set to the norename style, data chunks will
|
||||
// keep their temporary chunk names (with the transacion identifier
|
||||
// keep their temporary chunk names (with the transaction identifier
|
||||
// suffix). To distinguish them from temporary chunks, the txn field
|
||||
// of the metadata file is set to match the transaction identifier of
|
||||
// the data chunks.
|
||||
@@ -79,7 +78,6 @@ import (
|
||||
// Metadata format v1 does not define any control chunk types,
|
||||
// they are currently ignored aka reserved.
|
||||
// In future they can be used to implement resumable uploads etc.
|
||||
//
|
||||
const (
|
||||
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
||||
tempSuffixFormat = `_%04s`
|
||||
@@ -542,7 +540,6 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
||||
//
|
||||
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
||||
// otherwise temporary chunk name is produced.
|
||||
//
|
||||
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
||||
dir, parentName := path.Split(filePath)
|
||||
var name, tempSuffix string
|
||||
@@ -708,7 +705,6 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
||||
// directory together with dead chunks.
|
||||
// In future a flag named like `--chunker-list-hidden` may be added to
|
||||
// rclone that will tell List to reveal hidden chunks.
|
||||
//
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.base.List(ctx, dir)
|
||||
if err != nil {
|
||||
@@ -868,7 +864,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
// Note that chunker prefers analyzing file names rather than reading
|
||||
// the content of meta object assuming that directory scans are fast
|
||||
// but opening even a small file can be slow on some backends.
|
||||
//
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.scanObject(ctx, remote, false)
|
||||
}
|
||||
@@ -1084,7 +1079,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
// readXactID returns the transaction ID stored in the passed metadata object
|
||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
// if xactID has already been read and cahced return it now
|
||||
// if xactID has already been read and cached return it now
|
||||
if o.xIDCached {
|
||||
return o.xactID, nil
|
||||
}
|
||||
@@ -1586,7 +1581,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// This command will chain to `purge` from wrapped remote.
|
||||
// As a result it removes not only composite chunker files with their
|
||||
// active chunks but also all hidden temporary chunks in the directory.
|
||||
//
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.base.Features().Purge
|
||||
if do == nil {
|
||||
@@ -1628,7 +1622,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// Unsupported control chunks will get re-picked by a more recent
|
||||
// rclone version with unexpected results. This can be helped by
|
||||
// the `delete hidden` flag above or at least the user has been warned.
|
||||
//
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
// operations.Move can still call Remove if chunker's Move refuses
|
||||
@@ -1804,9 +1797,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1825,9 +1818,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -2125,7 +2118,6 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
// file, then tries to read it from metadata. This in theory
|
||||
// handles the unusual case when a small file has been tampered
|
||||
// on the level of wrapped remote but chunker is unaware of that.
|
||||
//
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
@@ -2414,7 +2406,6 @@ type metaSimpleJSON struct {
|
||||
// - for files larger than chunk size
|
||||
// - if file contents can be mistaken as meta object
|
||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||
//
|
||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
||||
version := metadataVersion
|
||||
if xactID == "" && version == 2 {
|
||||
@@ -2447,7 +2438,6 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
||||
// New format will have a higher version number and cannot be correctly
|
||||
// handled by current implementation.
|
||||
// The version check below will then explicitly ask user to upgrade rclone.
|
||||
//
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
@@ -457,9 +457,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -491,9 +491,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -90,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
@@ -130,7 +130,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -451,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||
}
|
||||
|
||||
// Need to include what we allready read
|
||||
// Need to include what we already read
|
||||
in = &ReadCloserWrapper{
|
||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||
Closer: in,
|
||||
@@ -731,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// If our new object is compressed we have to rename it with the correct size.
|
||||
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
@@ -742,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// will break stuff. Right no I can't think of a way to make this work.
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
@@ -785,9 +785,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -835,9 +835,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -127,11 +127,11 @@ type fileNameEncoding interface {
|
||||
// RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
// - it becomes lower case (no-one likes upper case filenames!)
|
||||
// - we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// EncodeToString encodes a string using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
@@ -244,7 +244,7 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
|
||||
// encryptSegment encrypts a path segment
|
||||
//
|
||||
// This uses EME with AES
|
||||
// This uses EME with AES.
|
||||
//
|
||||
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
||||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||
@@ -254,8 +254,8 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
// same filename must encrypt to the same thing.
|
||||
//
|
||||
// This means that
|
||||
// * filenames with the same name will encrypt the same
|
||||
// * filenames which start the same won't have a common prefix
|
||||
// - filenames with the same name will encrypt the same
|
||||
// - filenames which start the same won't have a common prefix
|
||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
if plaintext == "" {
|
||||
return ""
|
||||
@@ -1085,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
|
||||
// DecryptDataSeek decrypts the data stream from offset
|
||||
//
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
// The open function must return a ReadCloser opened to the offset supplied.
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
|
||||
@@ -125,7 +125,7 @@ names, or for debugging purposes.`,
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
length and if it's case sensitive.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -507,9 +507,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -532,9 +532,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -758,6 +758,9 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
@@ -1207,6 +1210,7 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
FilterAware: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -2177,7 +2181,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -2411,9 +2415,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -2646,9 +2650,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -3302,7 +3306,7 @@ drives found and a combined drive.
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
@@ -3566,7 +3570,6 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *baseObject) ModTime(ctx context.Context) time.Time {
|
||||
@@ -3823,7 +3826,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
@@ -190,6 +192,60 @@ func TestExtensionsForImportFormats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
gatewayTimeout := googleapi.Error{
|
||||
Code: 503,
|
||||
}
|
||||
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
|
||||
assert.True(t, timeoutRetry)
|
||||
assert.Equal(t, &gatewayTimeout, timeoutError)
|
||||
generic403 := googleapi.Error{
|
||||
Code: 403,
|
||||
}
|
||||
rLEItem := googleapi.ErrorItem{
|
||||
Reason: "rateLimitExceeded",
|
||||
Message: "User rate limit exceeded.",
|
||||
}
|
||||
generic403.Errors = append(generic403.Errors, rLEItem)
|
||||
oldStopUpload := f.opt.StopOnUploadLimit
|
||||
oldStopDownload := f.opt.StopOnDownloadLimit
|
||||
f.opt.StopOnUploadLimit = true
|
||||
f.opt.StopOnDownloadLimit = true
|
||||
defer func() {
|
||||
f.opt.StopOnUploadLimit = oldStopUpload
|
||||
f.opt.StopOnDownloadLimit = oldStopDownload
|
||||
}()
|
||||
expectedRLError := fserrors.FatalError(&generic403)
|
||||
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, rateLimitRetry)
|
||||
assert.Equal(t, rateLimitErr, expectedRLError)
|
||||
dQEItem := googleapi.ErrorItem{
|
||||
Reason: "downloadQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = dQEItem
|
||||
expectedDQError := fserrors.FatalError(&generic403)
|
||||
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, downloadQuotaRetry)
|
||||
assert.Equal(t, downloadQuotaError, expectedDQError)
|
||||
tDFLEItem := googleapi.ErrorItem{
|
||||
Reason: "teamDriveFileLimitExceeded",
|
||||
}
|
||||
generic403.Errors[0] = tDFLEItem
|
||||
expectedTDFLError := fserrors.FatalError(&generic403)
|
||||
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, teamDriveFileLimitRetry)
|
||||
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
|
||||
qEItem := googleapi.ErrorItem{
|
||||
Reason: "quotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = qEItem
|
||||
expectedQuotaError := fserrors.FatalError(&generic403)
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
@@ -462,6 +518,9 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
@@ -545,6 +604,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -309,7 +309,7 @@ func (b *batcher) Shutdown() {
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
|
||||
@@ -268,7 +268,7 @@ default based on the batch_mode in use.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish comitting`,
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -925,7 +925,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1044,9 +1044,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1105,9 +1105,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1669,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
@@ -1763,7 +1763,7 @@ func checkPathLength(name string) (err error) {
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package fichier provides an interface to the 1Fichier storage system.
|
||||
package fichier
|
||||
|
||||
import (
|
||||
|
||||
@@ -84,7 +84,7 @@ type CopyFileResponse struct {
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
// FileCopy is used in the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
@@ -95,7 +95,7 @@ type Status struct {
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status statisfies the error interface
|
||||
// Status satisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
m configmap.Mapper // to save config
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenMu sync.Mutex // hold when reading the token
|
||||
@@ -373,7 +373,7 @@ type params map[string]interface{}
|
||||
|
||||
// rpc calls the rpc.php method of the SME file fabric
|
||||
//
|
||||
// This is an entry point to all the method calls
|
||||
// This is an entry point to all the method calls.
|
||||
//
|
||||
// If result is nil then resp.Body will need closing
|
||||
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
|
||||
@@ -678,7 +678,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -697,7 +697,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -783,9 +783,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Wait for the the background task to complete if necessary
|
||||
// Wait for the background task to complete if necessary
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||
if taskID == "" || taskID == "0" {
|
||||
// No task to wait for
|
||||
@@ -956,9 +956,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1135,7 +1135,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1201,7 +1200,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -81,8 +81,22 @@ security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure concurrency is one more
|
||||
than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then it just needs to be one more than the
|
||||
maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -769,11 +783,12 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// Precision shows whether modified time is supported or not depending on the
|
||||
// FTP server capabilities, namely whether FTP server:
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
// - accepts the MDTM command to get file time (fGetTime)
|
||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
||||
// - accepts the MFMT command to set file time (fSetTime)
|
||||
// or non-standard form of the MDTM command (fSetTime, too)
|
||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
||||
//
|
||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
||||
@@ -1149,7 +1164,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -311,7 +311,7 @@ rclone does if you know the bucket exists already.
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
@@ -319,6 +319,10 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -343,6 +347,7 @@ type Options struct {
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -523,7 +528,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
|
||||
if opt.Endpoint != "" {
|
||||
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
|
||||
}
|
||||
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
}
|
||||
@@ -580,7 +589,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
// Set recurse to read sub directories.
|
||||
//
|
||||
// The remote has prefix removed from it and if addBucket is set
|
||||
// then it adds the bucket to the start.
|
||||
@@ -798,7 +807,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -900,9 +909,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Google Photos API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -178,7 +178,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
unAuth *rest.Client // unauthenticated http client
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
ts *oauthutil.TokenSource // token source for oauth2
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
@@ -661,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
|
||||
@@ -315,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
|
||||
// featureFilter creates a filter for the Feature enum
|
||||
//
|
||||
// The API only supports one feature, FAVORITES, so hardcode that feature
|
||||
// The API only supports one feature, FAVORITES, so hardcode that feature.
|
||||
//
|
||||
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
|
||||
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
|
||||
|
||||
@@ -265,9 +265,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package hdfs provides an interface to the HDFS storage system.
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
|
||||
@@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
|
||||
}
|
||||
|
||||
// Do not allow the root-prefix to be non-existent nor a directory,
|
||||
// Do not allow the root-prefix to be nonexistent nor a directory,
|
||||
// but it can be empty.
|
||||
if f.opt.RootPrefix != "" {
|
||||
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
|
||||
@@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// should be retried after the parent-directories of the destination have been created.
|
||||
// If so, it will create the parent-directories.
|
||||
//
|
||||
// If any errors arrise while finding the source or
|
||||
// If any errors arise while finding the source or
|
||||
// creating the parent-directory those will be returned.
|
||||
// Otherwise returns the originalError.
|
||||
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
|
||||
@@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
} else {
|
||||
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
|
||||
}
|
||||
// Try to check if object was updated, eitherway.
|
||||
// Try to check if object was updated, either way.
|
||||
// Metadata should be updated even if the upload fails.
|
||||
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
|
||||
} else {
|
||||
|
||||
@@ -138,7 +138,7 @@ var testTable = []struct {
|
||||
// pattern describes how to use data to construct the hash-input.
|
||||
// For every entry n at even indices this repeats the data n times.
|
||||
// For every entry m at odd indices this repeats a null-byte m times.
|
||||
// The input-data is constructed by concatinating the results in order.
|
||||
// The input-data is constructed by concatenating the results in order.
|
||||
pattern []int64
|
||||
out []byte
|
||||
name string
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package internal provides utilities for HiDrive.
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
||||
@@ -227,7 +227,7 @@ type Object struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// IAFile reprensents a subset of object in MetadataResponse.Files
|
||||
// IAFile represents a subset of object in MetadataResponse.Files
|
||||
type IAFile struct {
|
||||
Name string `json:"name"`
|
||||
// Source string `json:"source"`
|
||||
@@ -243,7 +243,7 @@ type IAFile struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
|
||||
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
|
||||
type MetadataResponse struct {
|
||||
Files []IAFile `json:"files"`
|
||||
ItemSize int64 `json:"item_size"`
|
||||
@@ -577,9 +577,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1273,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Jottacloud API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package jottacloud provides an interface to the Jottacloud storage system.
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
@@ -1247,7 +1248,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1397,9 +1398,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1417,7 +1418,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
@@ -1433,9 +1434,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1829,7 +1830,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package koofr provides an interface to the Koofr storage system.
|
||||
package koofr
|
||||
|
||||
import (
|
||||
@@ -667,7 +668,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// I am not sure about meaning of "path" parameter; in my experiments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -299,6 +300,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -443,6 +445,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
|
||||
|
||||
fsDirPath := f.localPath(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
@@ -493,6 +497,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
if fierr != nil {
|
||||
// Don't report errors on any file names that are excluded
|
||||
if useFilter {
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
if !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
@@ -510,6 +521,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
@@ -699,9 +715,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -9,11 +9,13 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
@@ -190,7 +192,7 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
@@ -366,3 +368,36 @@ func TestMetadata(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included"))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
const haveSetBTime = false
|
||||
|
||||
// setBTime changes the the birth time of the file passed in
|
||||
// setBTime changes the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
|
||||
@@ -5,13 +5,13 @@ package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the the birth time of the file passed in
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Mail.ru API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package mailru provides an interface to the Mail.ru Cloud storage system.
|
||||
package mailru
|
||||
|
||||
import (
|
||||
@@ -630,9 +631,10 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
||||
|
||||
// itemToEntry converts API item to rclone directory entry
|
||||
// The dirSize return value is:
|
||||
// <0 - for a file or in case of error
|
||||
// =0 - for an empty directory
|
||||
// >0 - for a non-empty directory
|
||||
//
|
||||
// <0 - for a file or in case of error
|
||||
// =0 - for an empty directory
|
||||
// >0 - for a non-empty directory
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
|
||||
remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home))
|
||||
if err != nil {
|
||||
|
||||
@@ -118,7 +118,7 @@ type Fs struct {
|
||||
|
||||
// Object describes a mega object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
// Will definitely have info but maybe not meta.
|
||||
//
|
||||
// Normally rclone would just store an ID here but go-mega and mega.nz
|
||||
// expect you to build an entire tree of all the objects in memory.
|
||||
@@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
|
||||
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
|
||||
}
|
||||
// i is number of directories to create (may be 0)
|
||||
// node is directory to create them from
|
||||
@@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
||||
return f._rootNode, nil
|
||||
}
|
||||
|
||||
// Check for pre-existing root
|
||||
// Check for preexisting root
|
||||
absRoot := f.srv.FS.GetRoot()
|
||||
node, err := f.findDir(absRoot, f.root)
|
||||
//log.Printf("findRoot findDir %p %v", node, err)
|
||||
@@ -536,7 +536,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the dirNode, object, leaf and error
|
||||
// Returns the dirNode, object, leaf and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
@@ -554,7 +554,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
// PutUnchecked uploads the object
|
||||
@@ -576,7 +576,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// PutUnchecked the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
// PutUnchecked uploads the object
|
||||
@@ -749,9 +749,9 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -979,7 +979,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1115,7 +1114,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -418,7 +418,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -463,9 +463,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -118,7 +118,7 @@ type Fs struct {
|
||||
filetype string // dir, file or symlink
|
||||
dirscreated map[string]bool // if implicit dir has been created already
|
||||
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
|
||||
statcache map[string][]File // cache successfull stat requests
|
||||
statcache map[string][]File // cache successful stat requests
|
||||
statcacheMutex sync.RWMutex // RWMutex to protect statcache
|
||||
}
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) getFileName(file *File) string {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
@@ -488,7 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Types passed and returned to and from the API
|
||||
|
||||
// Package api provides types used by the OneDrive API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -14,7 +13,7 @@ const (
|
||||
PackageTypeOneNote = "oneNote"
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
// Error is returned from OneDrive when things go wrong
|
||||
type Error struct {
|
||||
ErrorInfo struct {
|
||||
Code string `json:"code"`
|
||||
@@ -71,7 +70,7 @@ type Drive struct {
|
||||
Quota Quota `json:"quota"`
|
||||
}
|
||||
|
||||
// Timestamp represents represents date and time information for the
|
||||
// Timestamp represents date and time information for the
|
||||
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||
type Timestamp time.Time
|
||||
|
||||
@@ -250,8 +249,8 @@ type MoveItemRequest struct {
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
//CreateShareLinkRequest is the request to create a sharing link
|
||||
//Always Type:view and Scope:anonymous for public sharing
|
||||
// CreateShareLinkRequest is the request to create a sharing link
|
||||
// Always Type:view and Scope:anonymous for public sharing
|
||||
type CreateShareLinkRequest struct {
|
||||
Type string `json:"type"` // Link type in View, Edit or Embed
|
||||
Scope string `json:"scope,omitempty"` // Scope in anonymous, organization
|
||||
@@ -259,7 +258,7 @@ type CreateShareLinkRequest struct {
|
||||
Expiry *time.Time `json:"expirationDateTime,omitempty"` // A String with format of yyyy-MM-ddTHH:mm:ssZ of DateTime indicates the expiration time of the permission.
|
||||
}
|
||||
|
||||
//CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
// CreateShareLinkResponse is the response from CreateShareLinkRequest
|
||||
type CreateShareLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Roles []string `json:"roles"`
|
||||
|
||||
@@ -118,7 +118,7 @@ func init() {
|
||||
Help: "Microsoft Cloud Germany",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by 21Vianet in China",
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -600,14 +600,14 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
// Fs represents a remote OneDrive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the OneDrive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -615,7 +615,7 @@ type Fs struct {
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
}
|
||||
|
||||
// Object describes a one drive object
|
||||
// Object describes a OneDrive object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
@@ -645,7 +645,7 @@ func (f *Fs) Root() string {
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("One drive root '%s'", f.root)
|
||||
return fmt.Sprintf("OneDrive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -653,7 +653,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a one drive 'url'
|
||||
// parsePath parses a OneDrive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
@@ -727,7 +727,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
//
|
||||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
// If `relPath` == ”, do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts, _ := f.newOptsCallWithIDPath(normalizedID, relPath, true, "GET", "")
|
||||
|
||||
@@ -1137,7 +1137,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -1156,7 +1156,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -1280,9 +1280,9 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1387,9 +1387,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1843,7 +1843,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -2184,7 +2183,7 @@ func (o *Object) ID() string {
|
||||
* 3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
|
||||
* Instead, use these helper function, and customize the URL afterwards if needed.
|
||||
*
|
||||
* currently, the 21ViaNet's API differs in the following places:
|
||||
* currently, the Vnet Group's API differs in the following places:
|
||||
* - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
|
||||
* - this API doesn't work (gives invalid request)
|
||||
* - can be replaced with the following API:
|
||||
@@ -2233,7 +2232,7 @@ func escapeSingleQuote(str string) string {
|
||||
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
|
||||
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for 21ViaNet)
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for Vnet Group)
|
||||
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
|
||||
// if isPath is true, multi-level leaf like a/b/c can be passed
|
||||
func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package opendrive provides an interface to the OpenDrive storage system.
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
@@ -340,9 +341,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -404,9 +405,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -562,7 +563,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -599,7 +600,7 @@ func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *Fold
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -829,7 +830,6 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
@@ -13,7 +13,7 @@ const (
|
||||
timeFormat = `"` + time.RFC1123Z + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// pcloud API, by using RFC1123Z
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -588,7 +588,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -607,7 +607,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -681,9 +681,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -766,9 +766,9 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1073,7 +1073,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1152,7 +1151,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -493,7 +493,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -512,7 +512,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -530,9 +530,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists
|
||||
// This will produce an error if the object already exists.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -694,9 +694,9 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -870,7 +870,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -917,7 +916,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -230,7 +230,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
@@ -523,9 +523,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -562,9 +562,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -261,7 +261,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package putio provides an interface to the put.io storage system.
|
||||
package putio
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
@@ -431,9 +430,9 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -477,7 +476,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
//If it can't be found it returns the error ErrorObjectNotFound.
|
||||
// If it can't be found it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
|
||||
101
backend/s3/gen_setfrom.go
Normal file
101
backend/s3/gen_setfrom.go
Normal file
@@ -0,0 +1,101 @@
|
||||
// Generate boilerplate code for setting similar structs from each other
|
||||
|
||||
//go:build ignore
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// flags
|
||||
var (
|
||||
outputFile = flag.String("o", "", "Output file name, stdout if unset")
|
||||
)
|
||||
|
||||
// globals
|
||||
var (
|
||||
out io.Writer = os.Stdout
|
||||
)
|
||||
|
||||
// genSetFrom generates code to set the public members of a from b
|
||||
//
|
||||
// a and b should be pointers to structs
|
||||
//
|
||||
// a can be a different type from b
|
||||
//
|
||||
// Only the Fields which have the same name and assignable type on a
|
||||
// and b will be set.
|
||||
//
|
||||
// This is useful for copying between almost identical structures that
|
||||
// are frequently present in auto-generated code for cloud storage
|
||||
// interfaces.
|
||||
func genSetFrom(a, b interface{}) {
|
||||
name := fmt.Sprintf("setFrom_%T_%T", a, b)
|
||||
name = strings.Replace(name, ".", "", -1)
|
||||
name = strings.Replace(name, "*", "", -1)
|
||||
fmt.Fprintf(out, "\n// %s copies matching elements from a to b\n", name)
|
||||
fmt.Fprintf(out, "func %s(a %T, b %T) {\n", name, a, b)
|
||||
ta := reflect.TypeOf(a).Elem()
|
||||
tb := reflect.TypeOf(b).Elem()
|
||||
va := reflect.ValueOf(a).Elem()
|
||||
vb := reflect.ValueOf(b).Elem()
|
||||
for i := 0; i < tb.NumField(); i++ {
|
||||
bField := vb.Field(i)
|
||||
tbField := tb.Field(i)
|
||||
name := tbField.Name
|
||||
aField := va.FieldByName(name)
|
||||
taField, found := ta.FieldByName(name)
|
||||
if found && aField.IsValid() && bField.IsValid() && aField.CanSet() && tbField.Type.AssignableTo(taField.Type) {
|
||||
fmt.Fprintf(out, "\ta.%s = b.%s\n", name, name)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(out, "}\n")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *outputFile != "" {
|
||||
fd, err := os.Create(*outputFile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := fd.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
out = fd
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, `// Code generated by "go run gen_setfrom.go"; DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
`)
|
||||
|
||||
genSetFrom(new(s3.ListObjectsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectsOutput))
|
||||
genSetFrom(new(s3.ListObjectVersionsInput), new(s3.ListObjectsV2Input))
|
||||
genSetFrom(new(s3.ObjectVersion), new(s3.DeleteMarkerEntry))
|
||||
genSetFrom(new(s3.ListObjectsV2Output), new(s3.ListObjectVersionsOutput))
|
||||
genSetFrom(new(s3.Object), new(s3.ObjectVersion))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.HeadObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.UploadPartCopyInput), new(s3.CopyObjectInput))
|
||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.GetObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.PutObjectInput))
|
||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.PutObjectInput))
|
||||
}
|
||||
928
backend/s3/s3.go
928
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -4,14 +4,19 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -26,9 +31,15 @@ func gz(t *testing.T, s string) string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func md5sum(t *testing.T, s string) string {
|
||||
hash := md5.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
contents := gz(t, random.String(1000))
|
||||
original := random.String(1000)
|
||||
contents := gz(t, original)
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
btime := time.Now()
|
||||
@@ -65,6 +76,31 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded with and without decompression
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), md5sum(t, contents))
|
||||
})
|
||||
t.Run("Decompress", func(t *testing.T) {
|
||||
f.opt.Decompress = true
|
||||
defer func() {
|
||||
f.opt.Decompress = false
|
||||
}()
|
||||
checkDownload(original, -1, "")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
@@ -80,13 +116,279 @@ func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
// PutTestcontests checks the received object
|
||||
// PutTestcontents checks the received object
|
||||
|
||||
}
|
||||
|
||||
func TestVersionLess(t *testing.T) {
|
||||
key1 := "key1"
|
||||
key2 := "key2"
|
||||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
a, b *s3.ObjectVersion
|
||||
want bool
|
||||
}{
|
||||
{a: nil, b: nil, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: nil, want: false},
|
||||
{a: nil, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t2}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key2, LastModified: &t1}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: false},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1}, want: true},
|
||||
{a: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(false)}, b: &s3.ObjectVersion{Key: &key1, LastModified: &t1, IsLatest: aws.Bool(true)}, want: false},
|
||||
} {
|
||||
got := versionLess(test.a, test.b)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeDeleteMarkers(t *testing.T) {
|
||||
key1 := "key1"
|
||||
key2 := "key2"
|
||||
t1 := fstest.Time("2022-01-21T12:00:00+01:00")
|
||||
t2 := fstest.Time("2022-01-21T12:00:01+01:00")
|
||||
for n, test := range []struct {
|
||||
versions []*s3.ObjectVersion
|
||||
markers []*s3.DeleteMarkerEntry
|
||||
want []*s3.ObjectVersion
|
||||
}{
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
Size: isDeleteMarker,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
versions: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
},
|
||||
{
|
||||
Key: &key2,
|
||||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
markers: []*s3.DeleteMarkerEntry{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
},
|
||||
},
|
||||
want: []*s3.ObjectVersion{
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t2,
|
||||
},
|
||||
{
|
||||
Key: &key1,
|
||||
LastModified: &t1,
|
||||
Size: isDeleteMarker,
|
||||
},
|
||||
{
|
||||
Key: &key2,
|
||||
LastModified: &t2,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := mergeDeleteMarkers(test.versions, test.markers)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("%d: %+v", n, test))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Enable versioning for this bucket during this test
|
||||
_, err := f.setGetVersioning(ctx, "Enabled")
|
||||
if err != nil {
|
||||
t.Skipf("Couldn't enable versioning: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// Disable versioning for this bucket
|
||||
_, err := f.setGetVersioning(ctx, "Suspended")
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Small pause to make the LastModified different since AWS
|
||||
// only seems to track them to 1 second granularity
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Create an object
|
||||
const fileName = "test-versions.txt"
|
||||
contents := random.String(100)
|
||||
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
||||
// Small pause
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Remove it
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
|
||||
// Small pause to make the LastModified different since AWS only seems to track them to 1 second granularity
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// And create it with different size and contents
|
||||
newContents := random.String(101)
|
||||
newItem := fstest.NewItem(fileName, newContents, fstest.Time("2002-05-06T04:05:06.499999999Z"))
|
||||
newObj := fstests.PutTestContents(ctx, t, f, &newItem, newContents, true)
|
||||
|
||||
t.Run("Versions", func(t *testing.T) {
|
||||
// Set --s3-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
|
||||
// Read the contents
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
tests := 0
|
||||
var fileNameVersion string
|
||||
for _, entry := range entries {
|
||||
remote := entry.Remote()
|
||||
if remote == fileName {
|
||||
t.Run("ReadCurrent", func(t *testing.T) {
|
||||
assert.Equal(t, newContents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
||||
})
|
||||
tests++
|
||||
} else if versionTime, p := version.Remove(remote); !versionTime.IsZero() && p == fileName {
|
||||
t.Run("ReadVersion", func(t *testing.T) {
|
||||
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
|
||||
})
|
||||
assert.WithinDuration(t, obj.(*Object).lastModified, versionTime, time.Second, "object time must be with 1 second of version time")
|
||||
fileNameVersion = remote
|
||||
tests++
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 2, tests, "object missing from listing")
|
||||
|
||||
// Check we can read the object with a version suffix
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, fileNameVersion)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, o)
|
||||
assert.Equal(t, int64(100), o.Size(), o.Remote())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("VersionAt", func(t *testing.T) {
|
||||
// We set --s3-version-at for this test so make sure we reset it at the end
|
||||
defer func() {
|
||||
f.opt.VersionAt = fs.Time{}
|
||||
}()
|
||||
|
||||
var (
|
||||
firstObjectTime = obj.(*Object).lastModified
|
||||
secondObjectTime = newObj.(*Object).lastModified
|
||||
)
|
||||
|
||||
for _, test := range []struct {
|
||||
what string
|
||||
at time.Time
|
||||
want []fstest.Item
|
||||
wantErr error
|
||||
wantSize int64
|
||||
}{
|
||||
{
|
||||
what: "Before",
|
||||
at: firstObjectTime.Add(-time.Second),
|
||||
want: fstests.InternalTestFiles,
|
||||
wantErr: fs.ErrorObjectNotFound,
|
||||
},
|
||||
{
|
||||
what: "AfterOne",
|
||||
at: firstObjectTime.Add(time.Second),
|
||||
want: append([]fstest.Item{item}, fstests.InternalTestFiles...),
|
||||
wantSize: 100,
|
||||
},
|
||||
{
|
||||
what: "AfterDelete",
|
||||
at: secondObjectTime.Add(-time.Second),
|
||||
want: fstests.InternalTestFiles,
|
||||
wantErr: fs.ErrorObjectNotFound,
|
||||
},
|
||||
{
|
||||
what: "AfterTwo",
|
||||
at: secondObjectTime.Add(time.Second),
|
||||
want: append([]fstest.Item{newItem}, fstests.InternalTestFiles...),
|
||||
wantSize: 101,
|
||||
},
|
||||
} {
|
||||
t.Run(test.what, func(t *testing.T) {
|
||||
f.opt.VersionAt = fs.Time(test.at)
|
||||
t.Run("List", func(t *testing.T) {
|
||||
fstest.CheckListing(t, f, test.want)
|
||||
})
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
if gotErr == nil {
|
||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.CleanUpHidden(ctx))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --s3-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("NoHead", f.InternalTestNoHead)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
239
backend/s3/setfrom.go
Normal file
239
backend/s3/setfrom.go
Normal file
@@ -0,0 +1,239 @@
|
||||
// Code generated by "go run gen_setfrom.go"; DO NOT EDIT.
|
||||
|
||||
package s3
|
||||
|
||||
import "github.com/aws/aws-sdk-go/service/s3"
|
||||
|
||||
// setFrom_s3ListObjectsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *s3.ListObjectsV2Input) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectsV2Output_s3ListObjectsOutput copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectsOutput) {
|
||||
a.CommonPrefixes = b.CommonPrefixes
|
||||
a.Contents = b.Contents
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.IsTruncated = b.IsTruncated
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVersionsInput, b *s3.ListObjectsV2Input) {
|
||||
a.Bucket = b.Bucket
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Prefix = b.Prefix
|
||||
}
|
||||
|
||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||
func setFrom_s3ObjectVersion_s3DeleteMarkerEntry(a *s3.ObjectVersion, b *s3.DeleteMarkerEntry) {
|
||||
a.IsLatest = b.IsLatest
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
a.Owner = b.Owner
|
||||
a.VersionId = b.VersionId
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput copies matching elements from a to b
|
||||
func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV2Output, b *s3.ListObjectVersionsOutput) {
|
||||
a.CommonPrefixes = b.CommonPrefixes
|
||||
a.Delimiter = b.Delimiter
|
||||
a.EncodingType = b.EncodingType
|
||||
a.IsTruncated = b.IsTruncated
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
}
|
||||
|
||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||
func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ETag = b.ETag
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
a.Owner = b.Owner
|
||||
a.Size = b.Size
|
||||
a.StorageClass = b.StorageClass
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipartUploadInput, b *s3.HeadObjectOutput) {
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(a *s3.CreateMultipartUploadInput, b *s3.CopyObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.Expires = b.Expires
|
||||
a.GrantFullControl = b.GrantFullControl
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.Tagging = b.Tagging
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3UploadPartCopyInput_s3CopyObjectInput copies matching elements from a to b
|
||||
func setFrom_s3UploadPartCopyInput_s3CopyObjectInput(a *s3.UploadPartCopyInput, b *s3.CopyObjectInput) {
|
||||
a.Bucket = b.Bucket
|
||||
a.CopySource = b.CopySource
|
||||
a.CopySourceIfMatch = b.CopySourceIfMatch
|
||||
a.CopySourceIfModifiedSince = b.CopySourceIfModifiedSince
|
||||
a.CopySourceIfNoneMatch = b.CopySourceIfNoneMatch
|
||||
a.CopySourceIfUnmodifiedSince = b.CopySourceIfUnmodifiedSince
|
||||
a.CopySourceSSECustomerAlgorithm = b.CopySourceSSECustomerAlgorithm
|
||||
a.CopySourceSSECustomerKey = b.CopySourceSSECustomerKey
|
||||
a.CopySourceSSECustomerKeyMD5 = b.CopySourceSSECustomerKeyMD5
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.ExpectedSourceBucketOwner = b.ExpectedSourceBucketOwner
|
||||
a.Key = b.Key
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
}
|
||||
|
||||
// setFrom_s3HeadObjectOutput_s3GetObjectOutput copies matching elements from a to b
|
||||
func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.GetObjectOutput) {
|
||||
a.AcceptRanges = b.AcceptRanges
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.DeleteMarker = b.DeleteMarker
|
||||
a.ETag = b.ETag
|
||||
a.Expiration = b.Expiration
|
||||
a.Expires = b.Expires
|
||||
a.LastModified = b.LastModified
|
||||
a.Metadata = b.Metadata
|
||||
a.MissingMeta = b.MissingMeta
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.PartsCount = b.PartsCount
|
||||
a.ReplicationStatus = b.ReplicationStatus
|
||||
a.RequestCharged = b.RequestCharged
|
||||
a.Restore = b.Restore
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.VersionId = b.VersionId
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3CreateMultipartUploadInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(a *s3.CreateMultipartUploadInput, b *s3.PutObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.Expires = b.Expires
|
||||
a.GrantFullControl = b.GrantFullControl
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.Tagging = b.Tagging
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3HeadObjectOutput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.PutObjectInput) {
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentType = b.ContentType
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Seafile API.
|
||||
package api
|
||||
|
||||
// Some api objects are duplicated with only small differences,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package seafile provides an interface to the Seafile storage system.
|
||||
package seafile
|
||||
|
||||
import (
|
||||
@@ -136,7 +137,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
authMu sync.Mutex // Mutex to protect library decryption
|
||||
createDirMutex sync.Mutex // Protect creation of directories
|
||||
@@ -671,9 +672,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) e
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
@@ -722,9 +723,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
package sftp
|
||||
|
||||
import (
|
||||
@@ -276,19 +275,24 @@ Set to 0 to keep connections indefinitely.
|
||||
Name: "chunk_size",
|
||||
Help: `Upload and download chunk size.
|
||||
|
||||
This controls the maximum packet size used in the SFTP protocol. The
|
||||
RFC limits this to 32768 bytes (32k), however a lot of servers
|
||||
support larger sizes and setting it larger will increase transfer
|
||||
speed dramatically on high latency links.
|
||||
This controls the maximum size of payload in SFTP protocol packets.
|
||||
The RFC limits this to 32768 bytes (32k), which is the default. However,
|
||||
a lot of servers support larger sizes, typically limited to a maximum
|
||||
total package size of 256k, and setting it larger will increase transfer
|
||||
speed dramatically on high latency links. This includes OpenSSH, and,
|
||||
for example, using the value of 255k works well, leaving plenty of room
|
||||
for overhead while still being within a total packet size of 256k.
|
||||
|
||||
Only use a setting higher than 32k if you always connect to the same
|
||||
server or after sufficiently broad testing.
|
||||
|
||||
For example using the value of 252k with OpenSSH works well with its
|
||||
maximum packet size of 256k.
|
||||
|
||||
If you get the error "failed to send packet header: EOF" when copying
|
||||
a large file, try lowering this number.
|
||||
Make sure to test thoroughly before using a value higher than 32k,
|
||||
and only use it if you always connect to the same server or after
|
||||
sufficiently broad testing. If you get errors such as
|
||||
"failed to send packet payload: EOF", lots of "connection lost",
|
||||
or "corrupted on transfer", when copying a larger file, try lowering
|
||||
the value. The server run by [rclone serve sftp](/commands/rclone_serve_sftp)
|
||||
sends packets with standard 32k maximum payload so you must not
|
||||
set a different chunk_size when downloading files, but it accepts
|
||||
packets up to the 256k total size, so for uploads the chunk_size
|
||||
can be set as for the OpenSSH example above.
|
||||
`,
|
||||
Default: 32 * fs.Kibi,
|
||||
Advanced: true,
|
||||
|
||||
@@ -741,7 +741,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -760,7 +760,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -783,9 +783,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists
|
||||
// This will produce an error if the object already exists.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -973,9 +973,9 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1043,9 +1043,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1256,7 +1256,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1324,7 +1323,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Sia API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package sia provides an interface to the Sia storage system.
|
||||
package sia
|
||||
|
||||
import (
|
||||
|
||||
@@ -683,9 +683,9 @@ func newPrefix(prefix string) string {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -200,7 +200,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
m configmap.Mapper // config file access
|
||||
@@ -713,7 +713,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
// Returns the object, leaf, directoryID and error.
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
@@ -732,7 +732,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -755,9 +755,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists
|
||||
// This will produce an error if the object already exists.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -852,9 +852,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -985,9 +985,9 @@ func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err err
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1156,7 +1156,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1229,7 +1228,7 @@ func (f *Fs) createFile(ctx context.Context, pathID, leaf, mimeType string) (new
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -790,7 +790,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -902,9 +902,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1019,7 +1019,7 @@ func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer strin
|
||||
return err
|
||||
}
|
||||
|
||||
//remove copied segments when copy process failed
|
||||
// remove copied segments when copy process failed
|
||||
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) {
|
||||
fs.Debugf(f, "handle copy segment fail")
|
||||
if err == nil {
|
||||
@@ -1140,10 +1140,11 @@ func (o *Object) Size() int64 {
|
||||
// decodeMetaData sets the metadata in the object from a swift.Object
|
||||
//
|
||||
// Sets
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
//
|
||||
// o.lastModified
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.contentType
|
||||
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
|
||||
o.lastModified = info.LastModified
|
||||
o.size = info.Bytes
|
||||
@@ -1184,7 +1185,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
@@ -16,8 +16,8 @@ func TestInternalUrlEncode(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abcdefghijklmopqrstuvwxyz", "abcdefghijklmopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMOPQRSTUVWXYZ", "ABCDEFGHIJKLMOPQRSTUVWXYZ"},
|
||||
{"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"},
|
||||
{"0123456789", "0123456789"},
|
||||
{"abc/ABC/123", "abc/ABC/123"},
|
||||
{" ", "%20%20%20"},
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package policy provides utilities for the union implementation.
|
||||
package policy
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package union implements a virtual provider to join existing remotes.
|
||||
package union
|
||||
|
||||
import (
|
||||
@@ -213,9 +214,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -253,9 +254,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package upstream provides utility functionality to union.
|
||||
package upstream
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Uptobox API.
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
@@ -79,7 +80,7 @@ type UploadInfo struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is the respnse to a successful upload
|
||||
// UploadResponse is the response to a successful upload
|
||||
type UploadResponse struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package uptobox provides an interface to the Uptobox storage system.
|
||||
package uptobox
|
||||
|
||||
import (
|
||||
@@ -162,7 +163,7 @@ func (f *Fs) splitPathFull(pth string) (string, string) {
|
||||
return "//" + fullPath[:i], fullPath[i+1:]
|
||||
}
|
||||
|
||||
// splitPath is modified splitPath version that doesn't include the seperator
|
||||
// splitPath is modified splitPath version that doesn't include the separator
|
||||
// in the base path
|
||||
func (f *Fs) splitPath(pth string) (string, string) {
|
||||
// chop of any leading or trailing '/'
|
||||
@@ -478,7 +479,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
// yes it does take take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
// yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
|
||||
// create upload request
|
||||
opts := rest.Opts{
|
||||
@@ -756,7 +757,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return fmt.Errorf("dirmove: source not found: %w", err)
|
||||
}
|
||||
// check if the destination allready exists
|
||||
// check if the destination already exists
|
||||
dstPath := f.dirPath(dstRemote)
|
||||
_, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1})
|
||||
if err == nil {
|
||||
@@ -781,7 +782,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
needMove := srcBase != dstBase
|
||||
|
||||
// if we have to rename we'll have to use a temporary name since
|
||||
// there could allready be a directory with the same name as the src directory
|
||||
// there could already be a directory with the same name as the src directory
|
||||
if needRename {
|
||||
// rename to a temporary name
|
||||
tmpName := "rcloneTemp" + random.String(8)
|
||||
@@ -991,7 +992,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
|
||||
@@ -37,27 +37,29 @@ type Response struct {
|
||||
// This is a lazy way of decoding the multiple <s:propstat> in the
|
||||
// response.
|
||||
//
|
||||
// The response might look like this
|
||||
// The response might look like this.
|
||||
//
|
||||
// <d:response>
|
||||
// <d:href>/remote.php/webdav/Nextcloud%20Manual.pdf</d:href>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:getlastmodified>Tue, 19 Dec 2017 22:02:36 GMT</d:getlastmodified>
|
||||
// <d:getcontentlength>4143665</d:getcontentlength>
|
||||
// <d:resourcetype/>
|
||||
// <d:getetag>"048d7be4437ff7deeae94db50ff3e209"</d:getetag>
|
||||
// <d:getcontenttype>application/pdf</d:getcontenttype>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 200 OK</d:status>
|
||||
// </d:propstat>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:quota-used-bytes/>
|
||||
// <d:quota-available-bytes/>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 404 Not Found</d:status>
|
||||
// </d:propstat>
|
||||
//
|
||||
// <d:href>/remote.php/webdav/Nextcloud%20Manual.pdf</d:href>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:getlastmodified>Tue, 19 Dec 2017 22:02:36 GMT</d:getlastmodified>
|
||||
// <d:getcontentlength>4143665</d:getcontentlength>
|
||||
// <d:resourcetype/>
|
||||
// <d:getetag>"048d7be4437ff7deeae94db50ff3e209"</d:getetag>
|
||||
// <d:getcontenttype>application/pdf</d:getcontenttype>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 200 OK</d:status>
|
||||
// </d:propstat>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:quota-used-bytes/>
|
||||
// <d:quota-available-bytes/>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 404 Not Found</d:status>
|
||||
// </d:propstat>
|
||||
//
|
||||
// </d:response>
|
||||
//
|
||||
// So we elide the array of <d:propstat> and within that the array of
|
||||
@@ -127,8 +129,10 @@ type PropValue struct {
|
||||
// Error is used to describe webdav errors
|
||||
//
|
||||
// <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns">
|
||||
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
|
||||
// <s:message>File with name Photo could not be located</s:message>
|
||||
//
|
||||
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
|
||||
// <s:message>File with name Photo could not be located</s:message>
|
||||
//
|
||||
// </d:error>
|
||||
type Error struct {
|
||||
Exception string `xml:"exception,omitempty"`
|
||||
@@ -214,16 +218,18 @@ func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Quota is used to read the bytes used and available
|
||||
//
|
||||
// <d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
// <d:response>
|
||||
// <d:href>/remote.php/webdav/</d:href>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
// <d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 200 OK</d:status>
|
||||
// </d:propstat>
|
||||
// </d:response>
|
||||
//
|
||||
// <d:response>
|
||||
// <d:href>/remote.php/webdav/</d:href>
|
||||
// <d:propstat>
|
||||
// <d:prop>
|
||||
// <d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
// <d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
// </d:prop>
|
||||
// <d:status>HTTP/1.1 200 OK</d:status>
|
||||
// </d:propstat>
|
||||
// </d:response>
|
||||
//
|
||||
// </d:multistatus>
|
||||
type Quota struct {
|
||||
Available string `xml:"DAV: response>propstat>prop>quota-available-bytes"`
|
||||
|
||||
@@ -148,7 +148,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
@@ -800,7 +800,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -975,9 +975,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Copy or Move src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1029,9 +1029,9 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1051,9 +1051,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1291,7 +1291,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Yandex API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -20,7 +21,7 @@ type ResourceInfoRequestOptions struct {
|
||||
Fields []string
|
||||
}
|
||||
|
||||
//ResourceInfoResponse struct is returned by the API for metadata requests.
|
||||
// ResourceInfoResponse struct is returned by the API for metadata requests.
|
||||
type ResourceInfoResponse struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
@@ -61,7 +62,7 @@ type AsyncStatus struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
//CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
||||
// CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
||||
type CustomPropertyResponse struct {
|
||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package yandex provides an interface to the Yandex storage system.
|
||||
package yandex
|
||||
|
||||
import (
|
||||
@@ -30,7 +31,7 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
//oAuth
|
||||
// oAuth
|
||||
const (
|
||||
rcloneClientID = "ac39b43b9eba4cae8ffb788c06d816a8"
|
||||
rcloneEncryptedClientSecret = "EfyyNZ3YUEwXM5yAhi72G9YwKn2mkFrYwJNS7cY0TJAhFlX9K-uJFbGlpO-RYjrJ"
|
||||
@@ -442,7 +443,7 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -693,9 +694,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -723,9 +724,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1109,7 +1110,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
// Copy the reader into the object updating modTime and size.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Zoho API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -5,7 +6,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for Zoho
|
||||
// Time represents date and time information for Zoho
|
||||
// Zoho uses milliseconds since unix epoch (Java currentTimeMillis)
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -150,8 +150,8 @@ func init() {
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
worksspaceID := config.Result
|
||||
m.Set(configRootID, worksspaceID)
|
||||
workspaceID := config.Result
|
||||
m.Set(configRootID, workspaceID)
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
@@ -206,7 +206,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
@@ -644,7 +644,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -714,9 +714,9 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
//
|
||||
// This will produce an error if the object already exists
|
||||
// This will produce an error if the object already exists.
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
// Copy the reader in to the new object which is returned.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
@@ -857,9 +857,9 @@ func (f *Fs) rename(ctx context.Context, id, name string) (item *api.Item, err e
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -960,9 +960,9 @@ func (f *Fs) move(ctx context.Context, srcID, parentID string) (item *api.Item,
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
@@ -1152,7 +1152,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
@@ -1236,7 +1235,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
// If existing is set then it updates the object rather than creating a new one.
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
@@ -1265,7 +1264,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// upload was successfull, need to delete old object before rename
|
||||
// upload was successful, need to delete old object before rename
|
||||
if err = o.Remove(ctx); err != nil {
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package about provides the about command.
|
||||
package about
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package authorize provides the authorize command.
|
||||
package authorize
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package backend provides the backend command.
|
||||
package backend
|
||||
|
||||
import (
|
||||
|
||||
@@ -56,7 +56,8 @@ const (
|
||||
|
||||
// logReplacements make modern test logs comparable with golden dir.
|
||||
// It is a string slice of even length with this structure:
|
||||
// {`matching regular expression`, "mangled result string", ...}
|
||||
//
|
||||
// {`matching regular expression`, "mangled result string", ...}
|
||||
var logReplacements = []string{
|
||||
// skip syslog facility markers
|
||||
`^(<[1-9]>)(INFO |ERROR |NOTICE|DEBUG ):(.*)$`, "$2:$3",
|
||||
|
||||
@@ -290,7 +290,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
// exccessDeletes checks whether number of deletes is within allowed range
|
||||
// excessDeletes checks whether number of deletes is within allowed range
|
||||
func (ds *deltaSet) excessDeletes() bool {
|
||||
maxDelete := ds.opt.MaxDelete
|
||||
maxRatio := float64(maxDelete) / 100.0
|
||||
|
||||
@@ -15,7 +15,7 @@ func makeHelp(help string) string {
|
||||
return replacer.Replace(help)
|
||||
}
|
||||
|
||||
var shortHelp = `Perform bidirectonal synchronization between two paths.`
|
||||
var shortHelp = `Perform bidirectional synchronization between two paths.`
|
||||
|
||||
var rcHelp = makeHelp(`This takes the following parameters
|
||||
|
||||
|
||||
@@ -24,8 +24,8 @@ const ListingHeader = "# bisync listing v1 from"
|
||||
|
||||
// lineRegex and lineFormat define listing line format
|
||||
//
|
||||
// flags <- size -> <- hash -> id <------------ modtime -----------> "<----- remote"
|
||||
// - 3009805 md5:xxxxxx - 2006-01-02T15:04:05.000000000-0700 "12 - Wait.mp3"
|
||||
// flags <- size -> <- hash -> id <------------ modtime -----------> "<----- remote"
|
||||
// - 3009805 md5:xxxxxx - 2006-01-02T15:04:05.000000000-0700 "12 - Wait.mp3"
|
||||
//
|
||||
// flags: "-" for a file and "d" for a directory (reserved)
|
||||
// hash: "type:value" or "-" (example: "md5:378840336ab14afa9c6b8d887e68a340")
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package cachestats provides the cachestats command.
|
||||
package cachestats
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package cat provides the cat command.
|
||||
package cat
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package check provides the check command.
|
||||
package check
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package checksum provides the checksum command.
|
||||
package checksum
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package cleanup provides the cleanup command.
|
||||
package cleanup
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
// +build cmount
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
package cmount
|
||||
|
||||
import (
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user