1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-28 15:23:26 +00:00

Compare commits

..

13 Commits

Author SHA1 Message Date
Anagh Kumar Baranwal
2804f5068a build: cache go build & test
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2025-07-22 01:21:52 +05:30
albertony
d1ac6c2fe1 build: limit check for edits of autogenerated files to only commits in a pull request 2025-07-17 16:20:38 +02:00
albertony
da9c99272c build: extend check for edits of autogenerated files to all commits in a pull request 2025-07-17 16:20:38 +02:00
Sudipto Baral
9c7594d78f smb: refresh Kerberos credentials when ccache file changes
This change enhances the SMB backend in Rclone to automatically refresh
Kerberos credentials when the associated ccache file is updated.

Previously, credentials were only loaded once per path and cached
indefinitely, which caused issues when service tickets expired or the
cache was renewed on the server.
2025-07-17 14:34:44 +01:00
Albin Parou
70226cc653 s3: fix multipart upload and server side copy when using bucket policy SSE-C
When uploading or moving data within an s3-compatible bucket, the
`SSECustomer*` headers should always be forwarded: on
`CreateMultipartUpload`, `UploadPart`, `UploadCopyPart` and
`CompleteMultipartUpload`. But currently rclone doesn't forward those
headers to `CompleteMultipartUpload`.

This is a requirement if you want to enforce `SSE-C` at the bucket level
via a bucket policy. Cf: `This parameter is required only when the
object was created using a checksum algorithm or if your bucket policy
requires the use of SSE-C.` in
https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
2025-07-17 14:29:31 +01:00
liubingrun
c20e4bd99c backend/s3: Fix memory leak by cloning strings #8683
This commit addresses a potential memory leak in the S3 backend where
strings extracted from large API responses were keeping the entire
response in memory. The issue occurs because Go strings share underlying
memory with their source, preventing garbage collection of large XML
responses even when only small substrings are needed.

Signed-off-by: liubingrun <liubr1@chinatelecom.cn>
2025-07-17 12:31:52 +01:00
Nick Craig-Wood
ccfe153e9b purge: exit with a fatal error if filters are set on rclone purge
Fixes #8491
2025-07-17 11:17:08 +01:00
Nick Craig-Wood
c9730bcaaf docs: Add Backblaze as a Platinum sponsor 2025-07-17 11:17:08 +01:00
Nick Craig-Wood
03dd7486c1 Add Sam Pegg to contributors 2025-07-17 11:17:08 +01:00
raider13209
6249009fdf googlephotos: added warning for Google Photos compatability-fixes #8672 2025-07-17 10:48:12 +01:00
Nick Craig-Wood
8e2d76459f test: remove flakey TestChunkerChunk50bYandex: test 2025-07-16 16:39:57 +01:00
albertony
5e539c6a72 docs: Consolidate entries for Josh Soref in contributors 2025-07-13 14:05:45 +02:00
albertony
8866112400 docs: remove dead link to example of writing a plugin 2025-07-13 13:51:38 +02:00
23 changed files with 753 additions and 212 deletions

View File

@@ -283,7 +283,7 @@ jobs:
run: govulncheck ./...
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
if: github.event_name == 'pull_request'
android:

212
.github/workflows/build_android.yml vendored Normal file
View File

@@ -0,0 +1,212 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
name: Build & Push Android Builds
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- job_name: android-all
platform: linux/amd64/android/go1.24
os: ubuntu-latest
go: '>=1.24.0-rc.1'
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
check-latest: true
cache: false
- name: Set Environment Variables
shell: bash
run: |
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Set PLATFORM Variable
shell: bash
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Set CACHE_PREFIX Variable
shell: bash
run: |
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
- name: Load Go Module Cache
uses: actions/cache@v4
with:
path: |
${{ env.GOMODCACHE }}
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ env.CACHE_PREFIX }}-modcache
# Both load & update the cache when on default branch
- name: Load Go Build & Test Cache
id: go-cache
uses: actions/cache@v4
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
with:
path: |
${{ env.GOCACHE }}
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
# Only load the cache when not on default branch
- name: Load Go Build & Test Cache
id: go-cache-restore
uses: actions/cache/restore@v4
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
with:
path: |
${{ env.GOCACHE }}
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Build Native rclone
shell: bash
run: |
make
- name: Install gomobile
shell: bash
run: |
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: arm-v7a - gomobile build
shell: bash
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 - Set Environment Variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 - Build
shell: bash
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
- name: Delete Existing Cache
continue-on-error: true
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
for cache_id in "${cache_ids[@]}"; do
echo "Deleting Cache: $cache_id"
gh cache delete "$cache_id"
done
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
- name: Deploy Built Binaries
shell: bash
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -4,6 +4,10 @@
name: Build & Push Docker Images
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on:
push:
@@ -41,32 +45,26 @@ jobs:
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
shell: bash
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
shell: bash
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
env:
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
run: |
import os, re
@@ -82,8 +80,11 @@ jobs:
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
if os.environ.get("GITHUB_REF_NAME"):
if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
@@ -98,6 +99,12 @@ jobs:
script: |
return process.env.ImageOS
- name: Set CACHE_PREFIX Variable
shell: bash
run: |
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
@@ -130,22 +137,35 @@ jobs:
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
if: github.ref_name == github.event.repository.default_branch
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
/tmp/go-build-cache
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Load Go Build Cache for Docker
id: go-cache-restore
uses: actions/cache/restore@v4
if: github.ref_name != github.event.repository.default_branch
with:
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
/tmp/go-build-cache
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ env.CACHE_PREFIX }}-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
"/tmp/go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
@@ -172,9 +192,10 @@ jobs:
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
@@ -190,6 +211,19 @@ jobs:
retention-days: 1
if-no-files-found: error
- name: Delete Existing Cache
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
continue-on-error: true
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
for cache_id in "${cache_ids[@]}"; do
echo "Deleting Cache: $cache_id"
gh cache delete "$cache_id"
done
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
@@ -205,6 +239,7 @@ jobs:
merge-multiple: true
- name: Set REPO_NAME Variable
shell: bash
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}

104
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,104 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
name: Lint & Vulnerability Check
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
cancel-in-progress: true
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
pull_request:
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30
name: "lint"
runs-on: ubuntu-latest
steps:
- name: Get runner parameters
id: get-runner-parameters
shell: bash
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v4
- name: Install Go
id: setup-go
uses: actions/setup-go@v5
with:
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
- name: Cache
uses: actions/cache@v4
with:
path: |
~/go/pkg/mod
~/.cache/go-build
~/.cache/golangci-lint
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
version: latest
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
version: latest
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
version: latest
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
version: latest
skip-cache: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...

View File

@@ -571,8 +571,6 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend

View File

@@ -88,13 +88,13 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
racequicktest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone

View File

@@ -117,16 +117,22 @@ func init() {
} else {
oauthConfig.Scopes = scopesReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
return oauthutil.ConfigOut("warning1", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
case "warning1":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
return fs.ConfigConfirm("warning2", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning2":
// Warn the user that rclone can no longer download photos it didnt upload from google photos
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`)
case "warning_done":
return nil, nil
}

View File

@@ -4458,7 +4458,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
}
foundItems += len(resp.Contents)
for i, object := range resp.Contents {
remote := deref(object.Key)
remote := *stringClone(deref(object.Key))
if urlEncodeListings {
remote, err = url.QueryUnescape(remote)
if err != nil {
@@ -5061,8 +5061,11 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
MultipartUpload: &types.CompletedMultipartUpload{
Parts: parts,
},
RequestPayer: req.RequestPayer,
UploadId: uid,
RequestPayer: req.RequestPayer,
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
SSECustomerKey: req.SSECustomerKey,
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
UploadId: uid,
})
return f.shouldRetry(ctx, err)
})
@@ -5911,7 +5914,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
func s3MetadataToMap(s3Meta map[string]string) map[string]string {
meta := make(map[string]string, len(s3Meta))
for k, v := range s3Meta {
meta[strings.ToLower(k)] = v
meta[strings.ToLower(k)] = *stringClone(v)
}
return meta
}
@@ -5954,14 +5957,14 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
o.lastModified = *resp.LastModified
}
}
o.mimeType = deref(resp.ContentType)
o.mimeType = strings.Clone(deref(resp.ContentType))
// Set system metadata
o.storageClass = (*string)(&resp.StorageClass)
o.cacheControl = resp.CacheControl
o.contentDisposition = resp.ContentDisposition
o.contentEncoding = resp.ContentEncoding
o.contentLanguage = resp.ContentLanguage
o.storageClass = stringClone(string(resp.StorageClass))
o.cacheControl = stringClonePointer(resp.CacheControl)
o.contentDisposition = stringClonePointer(resp.ContentDisposition)
o.contentEncoding = stringClonePointer(resp.ContentEncoding)
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
// If decompressing then size and md5sum are unknown
if o.fs.opt.Decompress && deref(o.contentEncoding) == "gzip" {
@@ -6446,8 +6449,11 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
MultipartUpload: &types.CompletedMultipartUpload{
Parts: w.completedParts,
},
RequestPayer: w.multiPartUploadInput.RequestPayer,
UploadId: w.uploadID,
RequestPayer: w.multiPartUploadInput.RequestPayer,
SSECustomerAlgorithm: w.multiPartUploadInput.SSECustomerAlgorithm,
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
UploadId: w.uploadID,
})
return w.f.shouldRetry(ctx, err)
})
@@ -6476,8 +6482,8 @@ func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.R
}
var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
gotETag = s3cw.eTag
versionID = aws.String(s3cw.versionID)
gotETag = *stringClone(s3cw.eTag)
versionID = stringClone(s3cw.versionID)
hashOfHashes := md5.Sum(s3cw.md5s)
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(s3cw.completedParts))
@@ -6509,8 +6515,8 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
}
lastModified = time.Now()
if resp != nil {
etag = deref(resp.ETag)
versionID = resp.VersionId
etag = *stringClone(deref(resp.ETag))
versionID = stringClonePointer(resp.VersionId)
}
return etag, lastModified, versionID, nil
}
@@ -6562,8 +6568,8 @@ func (o *Object) uploadSinglepartPresignedRequest(ctx context.Context, req *s3.P
if date, err := http.ParseTime(resp.Header.Get("Date")); err != nil {
lastModified = date
}
etag = resp.Header.Get("Etag")
vID := resp.Header.Get("x-amz-version-id")
etag = *stringClone(resp.Header.Get("Etag"))
vID := *stringClone(resp.Header.Get("x-amz-version-id"))
if vID != "" {
versionID = &vID
}

View File

@@ -38,7 +38,7 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
d := &smb2.Dialer{}
if f.opt.UseKerberos {
cl, err := createKerberosClient(f.opt.KerberosCCache)
cl, err := NewKerberosFactory().GetClient(f.opt.KerberosCCache)
if err != nil {
return nil, err
}

View File

@@ -7,17 +7,95 @@ import (
"path/filepath"
"strings"
"sync"
"time"
"github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
)
var (
kerberosClient sync.Map // map[string]*client.Client
kerberosErr sync.Map // map[string]error
)
// KerberosFactory encapsulates dependencies and caches for Kerberos clients.
type KerberosFactory struct {
// clientCache caches Kerberos clients keyed by resolved ccache path.
// Clients are reused unless the associated ccache file changes.
clientCache sync.Map // map[string]*client.Client
// errCache caches errors encountered when loading Kerberos clients.
// Prevents repeated attempts for paths that previously failed.
errCache sync.Map // map[string]error
// modTimeCache tracks the last known modification time of ccache files.
// Used to detect changes and trigger credential refresh.
modTimeCache sync.Map // map[string]time.Time
loadCCache func(string) (*credentials.CCache, error)
newClient func(*credentials.CCache, *config.Config, ...func(*client.Settings)) (*client.Client, error)
loadConfig func() (*config.Config, error)
}
// NewKerberosFactory creates a new instance of KerberosFactory with default dependencies.
func NewKerberosFactory() *KerberosFactory {
return &KerberosFactory{
loadCCache: credentials.LoadCCache,
newClient: client.NewFromCCache,
loadConfig: defaultLoadKerberosConfig,
}
}
// GetClient returns a cached Kerberos client or creates a new one if needed.
func (kf *KerberosFactory) GetClient(ccachePath string) (*client.Client, error) {
resolvedPath, err := resolveCcachePath(ccachePath)
if err != nil {
return nil, err
}
stat, err := os.Stat(resolvedPath)
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
mtime := stat.ModTime()
if oldMod, ok := kf.modTimeCache.Load(resolvedPath); ok {
if oldTime, ok := oldMod.(time.Time); ok && oldTime.Equal(mtime) {
if errVal, ok := kf.errCache.Load(resolvedPath); ok {
return nil, errVal.(error)
}
if clientVal, ok := kf.clientCache.Load(resolvedPath); ok {
return clientVal.(*client.Client), nil
}
}
}
// Load Kerberos config
cfg, err := kf.loadConfig()
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
// Load ccache
ccache, err := kf.loadCCache(resolvedPath)
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
// Create new client
cl, err := kf.newClient(ccache, cfg)
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
// Cache and return
kf.clientCache.Store(resolvedPath, cl)
kf.errCache.Delete(resolvedPath)
kf.modTimeCache.Store(resolvedPath, mtime)
return cl, nil
}
// resolveCcachePath resolves the KRB5 ccache path.
func resolveCcachePath(ccachePath string) (string, error) {
if ccachePath == "" {
ccachePath = os.Getenv("KRB5CCNAME")
@@ -50,45 +128,11 @@ func resolveCcachePath(ccachePath string) (string, error) {
}
}
func loadKerberosConfig() (*config.Config, error) {
// defaultLoadKerberosConfig loads Kerberos config from default or env path.
func defaultLoadKerberosConfig() (*config.Config, error) {
cfgPath := os.Getenv("KRB5_CONFIG")
if cfgPath == "" {
cfgPath = "/etc/krb5.conf"
}
return config.Load(cfgPath)
}
// createKerberosClient creates a new Kerberos client.
func createKerberosClient(ccachePath string) (*client.Client, error) {
ccachePath, err := resolveCcachePath(ccachePath)
if err != nil {
return nil, err
}
// check if we already have a client or an error for this ccache path
if errVal, ok := kerberosErr.Load(ccachePath); ok {
return nil, errVal.(error)
}
if clientVal, ok := kerberosClient.Load(ccachePath); ok {
return clientVal.(*client.Client), nil
}
// create a new client if not found in the map
cfg, err := loadKerberosConfig()
if err != nil {
kerberosErr.Store(ccachePath, err)
return nil, err
}
ccache, err := credentials.LoadCCache(ccachePath)
if err != nil {
kerberosErr.Store(ccachePath, err)
return nil, err
}
cl, err := client.NewFromCCache(ccache, cfg)
if err != nil {
kerberosErr.Store(ccachePath, err)
return nil, err
}
kerberosClient.Store(ccachePath, cl)
return cl, nil
}

View File

@@ -4,7 +4,11 @@ import (
"os"
"path/filepath"
"testing"
"time"
"github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
"github.com/stretchr/testify/assert"
)
@@ -77,3 +81,62 @@ func TestResolveCcachePath(t *testing.T) {
})
}
}
func TestKerberosFactory_GetClient_ReloadOnCcacheChange(t *testing.T) {
// Create temp ccache file
tmpFile, err := os.CreateTemp("", "krb5cc_test")
assert.NoError(t, err)
defer func() {
if err := os.Remove(tmpFile.Name()); err != nil {
t.Logf("Failed to remove temp file %s: %v", tmpFile.Name(), err)
}
}()
unixPath := filepath.ToSlash(tmpFile.Name())
ccachePath := "FILE:" + unixPath
initialContent := []byte("CCACHE_VERSION 4\n")
_, err = tmpFile.Write(initialContent)
assert.NoError(t, err)
assert.NoError(t, tmpFile.Close())
// Setup mocks
loadCallCount := 0
mockLoadCCache := func(path string) (*credentials.CCache, error) {
loadCallCount++
return &credentials.CCache{}, nil
}
mockNewClient := func(cc *credentials.CCache, cfg *config.Config, opts ...func(*client.Settings)) (*client.Client, error) {
return &client.Client{}, nil
}
mockLoadConfig := func() (*config.Config, error) {
return &config.Config{}, nil
}
factory := &KerberosFactory{
loadCCache: mockLoadCCache,
newClient: mockNewClient,
loadConfig: mockLoadConfig,
}
// First call — triggers loading
_, err = factory.GetClient(ccachePath)
assert.NoError(t, err)
assert.Equal(t, 1, loadCallCount, "expected 1 load call")
// Second call — should reuse cache, no additional load
_, err = factory.GetClient(ccachePath)
assert.NoError(t, err)
assert.Equal(t, 1, loadCallCount, "expected cached reuse, no new load")
// Simulate file update
time.Sleep(1 * time.Second) // ensure mtime changes
err = os.WriteFile(tmpFile.Name(), []byte("CCACHE_VERSION 4\n#updated"), 0600)
assert.NoError(t, err)
// Third call — should detect change, reload
_, err = factory.GetClient(ccachePath)
assert.NoError(t, err)
assert.Equal(t, 2, loadCallCount, "expected reload on changed ccache")
}

View File

@@ -4,12 +4,12 @@ This script checks for unauthorized modifications in autogenerated sections of m
It is designed to be used in a GitHub Actions workflow or a local pre-commit hook.
Features:
- Detects markdown files changed in the last commit.
- Detects markdown files changed between a commit and one of its ancestors. Default is to
check the last commit only. When triggered on a pull request it should typically compare the
pull request branch head and its merge base - the commit on the main branch before it diverged.
- Identifies modified autogenerated sections marked by specific comments.
- Reports violations using GitHub Actions error messages.
- Exits with a nonzero status code if unauthorized changes are found.
It currently only checks the last commit.
"""
import re
@@ -22,18 +22,18 @@ def run_git(args):
"""
return subprocess.run(["git"] + args, stdout=subprocess.PIPE, text=True, check=True).stdout.strip()
def get_changed_files():
def get_changed_files(base, head):
"""
Retrieve a list of markdown files that were changed in the last commit.
Retrieve a list of markdown files that were changed between the base and head commits.
"""
files = run_git(["diff", "--name-only", "HEAD~1", "HEAD"]).splitlines()
files = run_git(["diff", "--name-only", f"{base}...{head}"]).splitlines()
return [f for f in files if f.endswith(".md")]
def get_diff(file):
def get_diff(file, base, head):
"""
Get the diff of a given file between the last commit and the current version.
Get the diff of a given file between the base and head commits.
"""
return run_git(["diff", "-U0", "HEAD~1", "HEAD", "--", file]).splitlines()
return run_git(["diff", "-U0", f"{base}...{head}", "--", file]).splitlines()
def get_file_content(ref, file):
"""
@@ -70,7 +70,7 @@ def show_error(file_name, line, message):
"""
print(f"::error file={file_name},line={line}::{message} at {file_name} line {line}")
def check_file(file):
def check_file(file, base, head):
"""
Check a markdown file for modifications in autogenerated regions.
"""
@@ -84,7 +84,7 @@ def check_file(file):
# Entire autogenerated file check.
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
if get_diff(file):
if get_diff(file, base, head):
show_error(file, 1, "Autogenerated file modified")
return True
return False
@@ -92,7 +92,7 @@ def check_file(file):
# Partial autogenerated regions.
regions_new = find_regions(new_lines)
regions_old = find_regions(old_lines)
diff = get_diff(file)
diff = get_diff(file, base, head)
hunk_re = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@")
new_ln = old_ln = None
@@ -124,9 +124,15 @@ def main():
"""
Main function that iterates over changed files and checks them for violations.
"""
base = "HEAD~1"
head = "HEAD"
if len(sys.argv) > 1:
base = sys.argv[1]
if len(sys.argv) > 2:
head = sys.argv[2]
found = False
for f in get_changed_files():
if check_file(f):
for f in get_changed_files(base, head):
if check_file(f, base, head):
found = True
if found:
sys.exit(1)

3
bin/go-test-cache/go.mod Normal file
View File

@@ -0,0 +1,3 @@
module go-test-cache
go 1.24

123
bin/go-test-cache/main.go Normal file
View File

@@ -0,0 +1,123 @@
// This code was copied from:
// https://github.com/fastly/cli/blob/main/scripts/go-test-cache/main.go
// which in turn is based on the following script and was generated using AI.
// https://github.com/airplanedev/blog-examples/blob/main/go-test-caching/update_file_timestamps.py?ref=airplane.ghost.io
//
// REFERENCE ARTICLE:
// https://web.archive.org/web/20240308061717/https://www.airplane.dev/blog/caching-golang-tests-in-ci
//
// It updates the mtime of the files to a mtime dervived from the sha1 hash of their contents.
package main
import (
"crypto/sha1"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
"time"
)
const (
bufSize = 65536
baseDate = 1684178360
timeFormat = "2006-01-02 15:04:05"
)
func main() {
repoRoot := "."
allDirs := make([]string, 0)
err := filepath.Walk(repoRoot, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
dirPath := filepath.Join(repoRoot, path)
relPath, _ := filepath.Rel(repoRoot, dirPath)
if strings.HasPrefix(relPath, ".") {
return nil
}
allDirs = append(allDirs, dirPath)
} else {
filePath := filepath.Join(repoRoot, path)
relPath, _ := filepath.Rel(repoRoot, filePath)
if strings.HasPrefix(relPath, ".") {
return nil
}
sha1Hash, err := getFileSHA1(filePath)
if err != nil {
return err
}
modTime := getModifiedTime(sha1Hash)
log.Printf("Setting modified time of file %s to %s\n", relPath, modTime.Format(timeFormat))
err = os.Chtimes(filePath, modTime, modTime)
if err != nil {
return err
}
}
return nil
})
if err != nil {
log.Fatal("Error:", err)
}
sort.Slice(allDirs, func(i, j int) bool {
return len(allDirs[i]) > len(allDirs[j]) || (len(allDirs[i]) == len(allDirs[j]) && allDirs[i] < allDirs[j])
})
for _, dirPath := range allDirs {
relPath, _ := filepath.Rel(repoRoot, dirPath)
log.Printf("Setting modified time of directory %s to %s\n", relPath, time.Unix(baseDate, 0).Format(timeFormat))
err := os.Chtimes(dirPath, time.Unix(baseDate, 0), time.Unix(baseDate, 0))
if err != nil {
log.Fatal("Error:", err)
}
}
log.Println("Done")
}
func getFileSHA1(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer file.Close()
// G401: Use of weak cryptographic primitive
// Disabling as the hash is used not for security reasons.
// The hash is used as a cache key to improve test run times.
// #nosec
// nosemgrep: go.lang.security.audit.crypto.use_of_weak_crypto.use-of-sha1
hash := sha1.New()
if _, err := io.CopyBuffer(hash, file, make([]byte, bufSize)); err != nil {
return "", err
}
return string(hash.Sum(nil)), nil
}
func getModifiedTime(sha1Hash string) time.Time {
hashBytes := []byte(sha1Hash)
lastFiveBytes := hashBytes[:5]
lastFiveValue := int64(0)
for _, b := range lastFiveBytes {
lastFiveValue = (lastFiveValue << 8) + int64(b)
}
modTime := baseDate - (lastFiveValue % 10000)
return time.Unix(modTime, 0)
}

View File

@@ -5,6 +5,8 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -35,6 +37,11 @@ implement this command directly, in which case ` + "`--checkers`" + ` will be ig
cmd.CheckArgs(1, 1, command, args)
fdst := cmd.NewFsDir(args)
cmd.Run(true, false, command, func() error {
ctx := context.Background()
fi := filter.GetConfig(ctx)
if !fi.InActive() {
fs.Fatalf(nil, "filters are not supported with purge (purge will delete everything unconditionally)")
}
return operations.Purge(context.Background(), fdst, "")
})
},

View File

@@ -422,7 +422,7 @@ put them back in again.` >}}
* Dov Murik <dov.murik@gmail.com>
* Ameer Dawood <ameer1234567890@gmail.com>
* Dan Hipschman <dan.hipschman@opendoor.com>
* Josh Soref <jsoref@users.noreply.github.com>
* Josh Soref <jsoref@users.noreply.github.com> <2119212+jsoref@users.noreply.github.com>
* David <david@staron.nl>
* Ingo <ingo@hoffmann.cx>
* Adam Plánský <adamplansky@users.noreply.github.com> <adamplansky@gmail.com>
@@ -637,7 +637,7 @@ put them back in again.` >}}
* anonion <aman207@users.noreply.github.com>
* Ryan Morey <4590343+rmorey@users.noreply.github.com>
* Simon Bos <simonbos9@gmail.com>
* YFdyh000 <yfdyh000@gmail.com> * Josh Soref <2119212+jsoref@users.noreply.github.com>
* YFdyh000 <yfdyh000@gmail.com>
* Øyvind Heddeland Instefjord <instefjord@outlook.com>
* Dmitry Deniskin <110819396+ddeniskin@users.noreply.github.com>
* Alexander Knorr <106825+opexxx@users.noreply.github.com>
@@ -991,3 +991,4 @@ put them back in again.` >}}
* Ross Smith II <ross@smithii.com>
* Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com>
* Sudipto Baral <sudiptobaral.me@gmail.com>
* Sam Pegg <samrpegg@gmail.com>

View File

@@ -377,8 +377,6 @@ will show you the defaults for the backends.
| Exclamation | `!` | `` |
| Hash | `#` | `` |
| InvalidUtf8 | An invalid UTF-8 character (e.g. latin1) | `<60>` |
| ForceNFC | All invalid NFC characters | Their valid NFC equivalents |
| ForceNFD | All invalid NFD characters | Their valid NFD equivalents |
| LeftCrLfHtVt | CR 0x0D, LF 0x0A, HT 0x09, VT 0x0B on the left of a string | `␍`, `␊`, `␉`, `␋` |
| LeftPeriod | `.` on the left of a string | `.` |
| LeftSpace | SPACE on the left of a string | `␠` |

View File

@@ -56,6 +56,7 @@ off donation.
Thank you very much to our sponsors:
{{< sponsor src="/img/logos/backblaze.svg" width="300" height="200" title="Visit our sponsor Backblaze" link="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715">}}
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}

View File

@@ -10,6 +10,15 @@
</div>
{{end}}
<div class="card">
<div class="card-header" style="padding: 5px 15px;">
Platinum Sponsor
</div>
<div class="card-body">
<a href="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715" target="_blank" rel="noopener" title="Visit rclone's sponsor Backblaze"><img src="/img/logos/backblaze.svg"></a><br />
</div>
</div>
<div class="card">
<div class="card-header" style="padding: 5px 15px;">
Gold Sponsor

View File

@@ -67,13 +67,13 @@ backends:
# maxfile: 10k
# ignore:
# - TestApplyTransforms
- backend: "chunker"
remote: "TestChunkerChunk50bYandex:"
fastlist: true
maxfile: 1k
ignore:
# Needs investigation
- TestDeduplicateNewestByHash
# - backend: "chunker"
# remote: "TestChunkerChunk50bYandex:"
# fastlist: true
# maxfile: 1k
# ignore:
# # Needs investigation
# - TestDeduplicateNewestByHash
# - backend: "chunker"
# remote: "TestChunkerChunk50bBox:"
# fastlist: true

View File

@@ -18,8 +18,6 @@ import (
"strconv"
"strings"
"unicode/utf8"
"golang.org/x/text/unicode/norm"
)
const (
@@ -63,8 +61,6 @@ const (
EncodeRightPeriod // Trailing .
EncodeRightCrLfHtVt // Trailing CR LF HT VT
EncodeInvalidUtf8 // Invalid UTF-8 bytes
EncodeInvalidNFC // Force NFC encoding
EncodeInvalidNFD // Force NFD encoding
EncodeDot // . and .. names
EncodeSquareBracket // []
EncodeSemicolon // ;
@@ -152,8 +148,6 @@ func init() {
alias("RightPeriod", EncodeRightPeriod)
alias("RightCrLfHtVt", EncodeRightCrLfHtVt)
alias("InvalidUtf8", EncodeInvalidUtf8)
alias("ForceNFC", EncodeInvalidNFC)
alias("ForceNFD", EncodeInvalidNFD)
alias("Dot", EncodeDot)
}
@@ -232,13 +226,6 @@ func (mask MultiEncoder) Encode(in string) string {
return ""
}
if mask.Has(EncodeInvalidNFD) {
in = norm.NFD.String(in)
}
if mask.Has(EncodeInvalidNFC) {
in = norm.NFC.String(in)
}
if mask.Has(EncodeDot) {
switch in {
case ".":
@@ -701,15 +688,6 @@ func (mask MultiEncoder) Decode(in string) string {
return in
}
/* // Can't losslessly decode NFC/NFD
if mask.Has(EncodeInvalidNFD) {
in = norm.NFC.String(in)
}
if mask.Has(EncodeInvalidNFC) {
in = norm.NFD.String(in)
}
*/
if mask.Has(EncodeDot) {
switch in {
case "":

View File

@@ -34,6 +34,7 @@ func TestEncodeString(t *testing.T) {
got := test.mask.String()
assert.Equal(t, test.want, got)
}
}
func TestEncodeSet(t *testing.T) {
@@ -59,6 +60,7 @@ func TestEncodeSet(t *testing.T) {
assert.Equal(t, test.wantErr, err != nil, err)
assert.Equal(t, test.want, got, test.in)
}
}
type testCase struct {
@@ -173,34 +175,6 @@ func TestEncodeInvalidUnicode(t *testing.T) {
}
}
func TestEncodeNFCNFD(t *testing.T) {
for i, tc := range []testCase{
{
mask: EncodeInvalidNFC,
in: "Über",
out: "Über",
},
{
mask: EncodeInvalidNFD,
in: "Über",
out: "Über",
},
} {
e := tc.mask
t.Run(strconv.FormatInt(int64(i), 10), func(t *testing.T) {
got := e.Encode(tc.in)
if got != tc.out {
t.Errorf("Encode(%q) want %q got %q", tc.in, tc.out, got)
}
// we can't losslessly decode NFC/NFD
/* got2 := e.Decode(got)
if got2 != tc.in {
t.Errorf("Decode(%q) want %q got %q", got, tc.in, got2)
} */
})
}
}
func TestEncodeDot(t *testing.T) {
for i, tc := range []testCase{
{

View File

@@ -67,8 +67,6 @@ var maskBits = []struct {
{encoder.EncodeRightPeriod, "EncodeRightPeriod"},
{encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt"},
{encoder.EncodeInvalidUtf8, "EncodeInvalidUtf8"},
{encoder.EncodeInvalidNFC, "ForceNFC"},
{encoder.EncodeInvalidNFD, "ForceNFD"},
{encoder.EncodeDot, "EncodeDot"},
}
@@ -84,15 +82,13 @@ var allEdges = []edge{
{encoder.EncodeLeftSpace, "EncodeLeftSpace", edgeLeft, []rune{' '}, []rune{'␠'}},
{encoder.EncodeLeftPeriod, "EncodeLeftPeriod", edgeLeft, []rune{'.'}, []rune{''}},
{encoder.EncodeLeftTilde, "EncodeLeftTilde", edgeLeft, []rune{'~'}, []rune{''}},
{
encoder.EncodeLeftCrLfHtVt, "EncodeLeftCrLfHtVt", edgeLeft,
{encoder.EncodeLeftCrLfHtVt, "EncodeLeftCrLfHtVt", edgeLeft,
[]rune{'\t', '\n', '\v', '\r'},
[]rune{'␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r'},
},
{encoder.EncodeRightSpace, "EncodeRightSpace", edgeRight, []rune{' '}, []rune{'␠'}},
{encoder.EncodeRightPeriod, "EncodeRightPeriod", edgeRight, []rune{'.'}, []rune{''}},
{
encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt", edgeRight,
{encoder.EncodeRightCrLfHtVt, "EncodeRightCrLfHtVt", edgeRight,
[]rune{'\t', '\n', '\v', '\r'},
[]rune{'␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r'},
},
@@ -103,122 +99,102 @@ var allMappings = []mapping{{
0,
}, []rune{
'␀',
},
}, {
}}, {
encoder.EncodeSlash, []rune{
'/',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeLtGt, []rune{
'<', '>',
}, []rune{
'', '',
},
}, {
}}, {
encoder.EncodeSquareBracket, []rune{
'[', ']',
}, []rune{
'', '',
},
}, {
}}, {
encoder.EncodeSemicolon, []rune{
';',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeExclamation, []rune{
'!',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeDoubleQuote, []rune{
'"',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeSingleQuote, []rune{
'\'',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeBackQuote, []rune{
'`',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeDollar, []rune{
'$',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeColon, []rune{
':',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeQuestion, []rune{
'?',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeAsterisk, []rune{
'*',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodePipe, []rune{
'|',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeHash, []rune{
'#',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodePercent, []rune{
'%',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeSlash, []rune{
'/',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeBackSlash, []rune{
'\\',
}, []rune{
'',
},
}, {
}}, {
encoder.EncodeCrLf, []rune{
rune(0x0D), rune(0x0A),
}, []rune{
'␍', '␊',
},
}, {
}}, {
encoder.EncodeDel, []rune{
0x7F,
}, []rune{
'␡',
},
}, {
}}, {
encoder.EncodeCtl,
runeRange(0x01, 0x1F),
runeRange('␁', '␟'),
@@ -462,7 +438,6 @@ func fatal(err error, s ...any) {
fs.Fatal(nil, fmt.Sprint(append(s, err)))
}
}
func fatalW(_ int, err error) func(...any) {
if err != nil {
return func(s ...any) {
@@ -496,14 +471,12 @@ func getMapping(mask encoder.MultiEncoder) mapping {
}
return mapping{}
}
func collectEncodables(m []mapping) (out []rune) {
for _, s := range m {
out = append(out, s.src...)
}
return
}
func collectEncoded(m []mapping) (out []rune) {
for _, s := range m {
out = append(out, s.dst...)