mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
52 Commits
fix-7831-w
...
feat/cache
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2804f5068a | ||
|
|
d1ac6c2fe1 | ||
|
|
da9c99272c | ||
|
|
9c7594d78f | ||
|
|
70226cc653 | ||
|
|
c20e4bd99c | ||
|
|
ccfe153e9b | ||
|
|
c9730bcaaf | ||
|
|
03dd7486c1 | ||
|
|
6249009fdf | ||
|
|
8e2d76459f | ||
|
|
5e539c6a72 | ||
|
|
8866112400 | ||
|
|
bfdd5e2c22 | ||
|
|
f3f16cd2b9 | ||
|
|
d84ea2ec52 | ||
|
|
b259241c07 | ||
|
|
a8ab0730a7 | ||
|
|
cef207cf94 | ||
|
|
e728ea32d1 | ||
|
|
ccdee0420f | ||
|
|
8a51e11d23 | ||
|
|
9083f1ff15 | ||
|
|
2964b1a169 | ||
|
|
b6767820de | ||
|
|
821e7fce45 | ||
|
|
b7c6268d3e | ||
|
|
521d6b88d4 | ||
|
|
cf767b0856 | ||
|
|
25f7809822 | ||
|
|
74c0b1ea3b | ||
|
|
f4dcb1e9cf | ||
|
|
90f1d023ff | ||
|
|
e9c5f2d4e8 | ||
|
|
1249e9b5ac | ||
|
|
d47bc5f6c4 | ||
|
|
efb1794135 | ||
|
|
71b98a03a9 | ||
|
|
8e625c6593 | ||
|
|
6b2cd7c631 | ||
|
|
aa4aead63c | ||
|
|
c491d12cd0 | ||
|
|
9e4d703a56 | ||
|
|
fc0c0a7771 | ||
|
|
d5cc0d83b0 | ||
|
|
52762dc866 | ||
|
|
3c092cfc17 | ||
|
|
7f3f1af541 | ||
|
|
f885c481f0 | ||
|
|
865d4b2bda | ||
|
|
3cb1e65eb6 | ||
|
|
f667346718 |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -283,7 +283,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
android:
|
||||
|
||||
212
.github/workflows/build_android.yml
vendored
Normal file
212
.github/workflows/build_android.yml
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_android.yml" -*-
|
||||
|
||||
name: Build & Push Android Builds
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
android:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- job_name: android-all
|
||||
platform: linux/amd64/android/go1.24
|
||||
os: ubuntu-latest
|
||||
go: '>=1.24.0-rc.1'
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_ENV
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_ENV
|
||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Load Go Module Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOMODCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-modcache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-modcache
|
||||
|
||||
# Both load & update the cache when on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
# Only load the cache when not on default branch
|
||||
- name: Load Go Build & Test Cache
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch || github.event_name == 'pull_request'
|
||||
with:
|
||||
path: |
|
||||
${{ env.GOCACHE }}
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Build Native rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Install gomobile
|
||||
shell: bash
|
||||
run: |
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - gomobile build
|
||||
shell: bash
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 - Set Environment Variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 - Build
|
||||
shell: bash
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
|
||||
- name: Delete Existing Cache
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
if: github.ref_name == github.event.repository.default_branch && github.event_name != 'pull_request' && steps.go-cache.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Deploy Built Binaries
|
||||
shell: bash
|
||||
run: |
|
||||
make ci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
75
.github/workflows/build_publish_docker_image.yml
vendored
75
.github/workflows/build_publish_docker_image.yml
vendored
@@ -4,6 +4,10 @@
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
@@ -41,32 +45,26 @@ jobs:
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
shell: bash
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
env:
|
||||
GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
@@ -82,8 +80,11 @@ jobs:
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
if os.environ.get("GITHUB_REF_NAME"):
|
||||
if os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
elif os.environ['GITHUB_REF_NAME'] != os.environ['GITHUB_EVENT_REPOSITORY_DEFAULT_BRANCH']:
|
||||
ref_name_slug += "-ref-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
@@ -98,6 +99,12 @@ jobs:
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Set CACHE_PREFIX Variable
|
||||
shell: bash
|
||||
run: |
|
||||
cache_prefix=${{ runner.os }}-${{ steps.imageos.outputs.result }}-${{ env.PLATFORM }}-docker-go
|
||||
echo "CACHE_PREFIX=${cache_prefix}" >> $GITHUB_ENV
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
@@ -130,22 +137,35 @@ jobs:
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
if: github.ref_name == github.event.repository.default_branch
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
if: github.ref_name != github.event.repository.default_branch
|
||||
with:
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
/tmp/go-build-cache
|
||||
key: ${{ env.CACHE_PREFIX }}-cache-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ env.CACHE_PREFIX }}-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
"/tmp/go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit || steps.go-cache-restore.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -172,9 +192,10 @@ jobs:
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }}
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-cache
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.PLATFORM }}-${{ env.CACHE_NAME }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
@@ -190,6 +211,19 @@ jobs:
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Delete Existing Cache
|
||||
if: github.ref_name == github.event.repository.default_branch && steps.go-cache.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
cache_ids=($(gh cache list --key "${{ env.CACHE_PREFIX }}-cache" --json id | jq '.[].id'))
|
||||
for cache_id in "${cache_ids[@]}"; do
|
||||
echo "Deleting Cache: $cache_id"
|
||||
gh cache delete "$cache_id"
|
||||
done
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
@@ -205,6 +239,7 @@ jobs:
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
shell: bash
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
|
||||
104
.github/workflows/lint.yml
vendored
Normal file
104
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable lint.yml" -*-
|
||||
|
||||
name: Lint & Vulnerability Check
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get runner parameters
|
||||
id: get-runner-parameters
|
||||
shell: bash
|
||||
run: |
|
||||
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/.cache/golangci-lint
|
||||
key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }}
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
@@ -571,8 +571,6 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
|
||||
6
Makefile
6
Makefile
@@ -88,13 +88,13 @@ test: rclone test_all
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(LDFLAGS) $(BUILDTAGS) -run XXX ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
|
||||
@@ -52,7 +52,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
ci.Timeout = saveTimeout
|
||||
}()
|
||||
ci.LowLevelRetries = 1
|
||||
ci.Timeout = idleTimeout
|
||||
ci.Timeout = fs.Duration(idleTimeout)
|
||||
|
||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
||||
fixFs := deriveFs(ctx, t, f, settings{
|
||||
|
||||
@@ -117,16 +117,22 @@ func init() {
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("warning1", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
case "warning":
|
||||
case "warning1":
|
||||
// Warn the user as required by google photos integration
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
return fs.ConfigConfirm("warning2", true, "config_warning", `Warning
|
||||
|
||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||
are stored in full resolution at original quality. These uploads
|
||||
will count towards storage in your Google Account.`)
|
||||
|
||||
case "warning2":
|
||||
// Warn the user that rclone can no longer download photos it didnt upload from google photos
|
||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
||||
IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`)
|
||||
|
||||
case "warning_done":
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -421,6 +421,9 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
return uploadFile(ctx, f, in, src.Remote(), options...)
|
||||
}
|
||||
|
||||
@@ -659,6 +662,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if src.Size() == 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
srcRemote := o.Remote()
|
||||
|
||||
@@ -670,7 +676,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
var resp *client.UploadResult
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var res *http.Response
|
||||
res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{
|
||||
FileName: fileName,
|
||||
@@ -725,7 +731,7 @@ func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, opti
|
||||
UseUniqueFileName := new(bool)
|
||||
*UseUniqueFileName = false
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.pacer.CallNoRetry(func() (bool, error) {
|
||||
var res *http.Response
|
||||
var err error
|
||||
res, _, err = f.ik.Upload(ctx, in, client.UploadParam{
|
||||
@@ -794,35 +800,10 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
file, err := srcObj.Open(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uploadFile(ctx, f, file, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
|
||||
@@ -2635,7 +2635,7 @@ The parameter should be a date, "2006-01-02", datetime "2006-01-02
|
||||
Note that when using this no file write operations are permitted,
|
||||
so you can't upload files or delete them.
|
||||
|
||||
See [the time option docs](/docs/#time-option) for valid formats.
|
||||
See [the time option docs](/docs/#time-options) for valid formats.
|
||||
`,
|
||||
Default: fs.Time{},
|
||||
Advanced: true,
|
||||
@@ -4458,7 +4458,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
foundItems += len(resp.Contents)
|
||||
for i, object := range resp.Contents {
|
||||
remote := deref(object.Key)
|
||||
remote := *stringClone(deref(object.Key))
|
||||
if urlEncodeListings {
|
||||
remote, err = url.QueryUnescape(remote)
|
||||
if err != nil {
|
||||
@@ -5061,8 +5061,11 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: parts,
|
||||
},
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -5911,7 +5914,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func s3MetadataToMap(s3Meta map[string]string) map[string]string {
|
||||
meta := make(map[string]string, len(s3Meta))
|
||||
for k, v := range s3Meta {
|
||||
meta[strings.ToLower(k)] = v
|
||||
meta[strings.ToLower(k)] = *stringClone(v)
|
||||
}
|
||||
return meta
|
||||
}
|
||||
@@ -5954,14 +5957,14 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
}
|
||||
o.mimeType = deref(resp.ContentType)
|
||||
o.mimeType = strings.Clone(deref(resp.ContentType))
|
||||
|
||||
// Set system metadata
|
||||
o.storageClass = (*string)(&resp.StorageClass)
|
||||
o.cacheControl = resp.CacheControl
|
||||
o.contentDisposition = resp.ContentDisposition
|
||||
o.contentEncoding = resp.ContentEncoding
|
||||
o.contentLanguage = resp.ContentLanguage
|
||||
o.storageClass = stringClone(string(resp.StorageClass))
|
||||
o.cacheControl = stringClonePointer(resp.CacheControl)
|
||||
o.contentDisposition = stringClonePointer(resp.ContentDisposition)
|
||||
o.contentEncoding = stringClonePointer(resp.ContentEncoding)
|
||||
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
|
||||
|
||||
// If decompressing then size and md5sum are unknown
|
||||
if o.fs.opt.Decompress && deref(o.contentEncoding) == "gzip" {
|
||||
@@ -6446,8 +6449,11 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: w.completedParts,
|
||||
},
|
||||
RequestPayer: w.multiPartUploadInput.RequestPayer,
|
||||
UploadId: w.uploadID,
|
||||
RequestPayer: w.multiPartUploadInput.RequestPayer,
|
||||
SSECustomerAlgorithm: w.multiPartUploadInput.SSECustomerAlgorithm,
|
||||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
UploadId: w.uploadID,
|
||||
})
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -6476,8 +6482,8 @@ func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.R
|
||||
}
|
||||
|
||||
var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
|
||||
gotETag = s3cw.eTag
|
||||
versionID = aws.String(s3cw.versionID)
|
||||
gotETag = *stringClone(s3cw.eTag)
|
||||
versionID = stringClone(s3cw.versionID)
|
||||
|
||||
hashOfHashes := md5.Sum(s3cw.md5s)
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(s3cw.completedParts))
|
||||
@@ -6509,8 +6515,8 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
|
||||
}
|
||||
lastModified = time.Now()
|
||||
if resp != nil {
|
||||
etag = deref(resp.ETag)
|
||||
versionID = resp.VersionId
|
||||
etag = *stringClone(deref(resp.ETag))
|
||||
versionID = stringClonePointer(resp.VersionId)
|
||||
}
|
||||
return etag, lastModified, versionID, nil
|
||||
}
|
||||
@@ -6562,8 +6568,8 @@ func (o *Object) uploadSinglepartPresignedRequest(ctx context.Context, req *s3.P
|
||||
if date, err := http.ParseTime(resp.Header.Get("Date")); err != nil {
|
||||
lastModified = date
|
||||
}
|
||||
etag = resp.Header.Get("Etag")
|
||||
vID := resp.Header.Get("x-amz-version-id")
|
||||
etag = *stringClone(resp.Header.Get("Etag"))
|
||||
vID := *stringClone(resp.Header.Get("x-amz-version-id"))
|
||||
if vID != "" {
|
||||
versionID = &vID
|
||||
}
|
||||
|
||||
@@ -932,7 +932,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: f.ci.ConnectTimeout,
|
||||
Timeout: time.Duration(f.ci.ConnectTimeout),
|
||||
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{}
|
||||
if f.opt.UseKerberos {
|
||||
cl, err := getKerberosClient()
|
||||
cl, err := NewKerberosFactory().GetClient(f.opt.KerberosCCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,72 +7,132 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
)
|
||||
|
||||
var (
|
||||
kerberosClient *client.Client
|
||||
kerberosErr error
|
||||
kerberosOnce sync.Once
|
||||
)
|
||||
// KerberosFactory encapsulates dependencies and caches for Kerberos clients.
|
||||
type KerberosFactory struct {
|
||||
// clientCache caches Kerberos clients keyed by resolved ccache path.
|
||||
// Clients are reused unless the associated ccache file changes.
|
||||
clientCache sync.Map // map[string]*client.Client
|
||||
|
||||
// getKerberosClient returns a Kerberos client that can be used to authenticate.
|
||||
func getKerberosClient() (*client.Client, error) {
|
||||
if kerberosClient == nil || kerberosErr == nil {
|
||||
kerberosOnce.Do(func() {
|
||||
kerberosClient, kerberosErr = createKerberosClient()
|
||||
})
|
||||
}
|
||||
// errCache caches errors encountered when loading Kerberos clients.
|
||||
// Prevents repeated attempts for paths that previously failed.
|
||||
errCache sync.Map // map[string]error
|
||||
|
||||
return kerberosClient, kerberosErr
|
||||
// modTimeCache tracks the last known modification time of ccache files.
|
||||
// Used to detect changes and trigger credential refresh.
|
||||
modTimeCache sync.Map // map[string]time.Time
|
||||
|
||||
loadCCache func(string) (*credentials.CCache, error)
|
||||
newClient func(*credentials.CCache, *config.Config, ...func(*client.Settings)) (*client.Client, error)
|
||||
loadConfig func() (*config.Config, error)
|
||||
}
|
||||
|
||||
// createKerberosClient creates a new Kerberos client.
|
||||
func createKerberosClient() (*client.Client, error) {
|
||||
cfgPath := os.Getenv("KRB5_CONFIG")
|
||||
if cfgPath == "" {
|
||||
cfgPath = "/etc/krb5.conf"
|
||||
// NewKerberosFactory creates a new instance of KerberosFactory with default dependencies.
|
||||
func NewKerberosFactory() *KerberosFactory {
|
||||
return &KerberosFactory{
|
||||
loadCCache: credentials.LoadCCache,
|
||||
newClient: client.NewFromCCache,
|
||||
loadConfig: defaultLoadKerberosConfig,
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := config.Load(cfgPath)
|
||||
// GetClient returns a cached Kerberos client or creates a new one if needed.
|
||||
func (kf *KerberosFactory) GetClient(ccachePath string) (*client.Client, error) {
|
||||
resolvedPath, err := resolveCcachePath(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment, falling back to the
|
||||
// default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
stat, err := os.Stat(resolvedPath)
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
mtime := stat.ModTime()
|
||||
|
||||
if oldMod, ok := kf.modTimeCache.Load(resolvedPath); ok {
|
||||
if oldTime, ok := oldMod.(time.Time); ok && oldTime.Equal(mtime) {
|
||||
if errVal, ok := kf.errCache.Load(resolvedPath); ok {
|
||||
return nil, errVal.(error)
|
||||
}
|
||||
if clientVal, ok := kf.clientCache.Load(resolvedPath); ok {
|
||||
return clientVal.(*client.Client), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load Kerberos config
|
||||
cfg, err := kf.loadConfig()
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load ccache
|
||||
ccache, err := kf.loadCCache(resolvedPath)
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create new client
|
||||
cl, err := kf.newClient(ccache, cfg)
|
||||
if err != nil {
|
||||
kf.errCache.Store(resolvedPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Cache and return
|
||||
kf.clientCache.Store(resolvedPath, cl)
|
||||
kf.errCache.Delete(resolvedPath)
|
||||
kf.modTimeCache.Store(resolvedPath, mtime)
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
// resolveCcachePath resolves the KRB5 ccache path.
|
||||
func resolveCcachePath(ccachePath string) (string, error) {
|
||||
if ccachePath == "" {
|
||||
ccachePath = os.Getenv("KRB5CCNAME")
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.Contains(ccachePath, ":"):
|
||||
parts := strings.SplitN(ccachePath, ":", 2)
|
||||
switch parts[0] {
|
||||
prefix, path := parts[0], parts[1]
|
||||
switch prefix {
|
||||
case "FILE":
|
||||
ccachePath = parts[1]
|
||||
return path, nil
|
||||
case "DIR":
|
||||
primary, err := os.ReadFile(filepath.Join(parts[1], "primary"))
|
||||
primary, err := os.ReadFile(filepath.Join(path, "primary"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
ccachePath = filepath.Join(parts[1], strings.TrimSpace(string(primary)))
|
||||
return filepath.Join(path, strings.TrimSpace(string(primary))), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
|
||||
return "", fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
|
||||
}
|
||||
case ccachePath == "":
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
ccachePath = "/tmp/krb5cc_" + u.Uid
|
||||
return "/tmp/krb5cc_" + u.Uid, nil
|
||||
default:
|
||||
return ccachePath, nil
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.NewFromCCache(ccache, cfg)
|
||||
}
|
||||
|
||||
// defaultLoadKerberosConfig loads Kerberos config from default or env path.
|
||||
func defaultLoadKerberosConfig() (*config.Config, error) {
|
||||
cfgPath := os.Getenv("KRB5_CONFIG")
|
||||
if cfgPath == "" {
|
||||
cfgPath = "/etc/krb5.conf"
|
||||
}
|
||||
return config.Load(cfgPath)
|
||||
}
|
||||
|
||||
142
backend/smb/kerberos_test.go
Normal file
142
backend/smb/kerberos_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestResolveCcachePath(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Setup: files for FILE and DIR modes
|
||||
fileCcache := filepath.Join(tmpDir, "file_ccache")
|
||||
err := os.WriteFile(fileCcache, []byte{}, 0600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dirCcache := filepath.Join(tmpDir, "dir_ccache")
|
||||
err = os.Mkdir(dirCcache, 0755)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(dirCcache, "primary"), []byte("ticket"), 0600)
|
||||
assert.NoError(t, err)
|
||||
dirCcacheTicket := filepath.Join(dirCcache, "ticket")
|
||||
err = os.WriteFile(dirCcacheTicket, []byte{}, 0600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ccachePath string
|
||||
envKRB5CCNAME string
|
||||
expected string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "FILE: prefix from env",
|
||||
ccachePath: "",
|
||||
envKRB5CCNAME: "FILE:" + fileCcache,
|
||||
expected: fileCcache,
|
||||
},
|
||||
{
|
||||
name: "DIR: prefix from env",
|
||||
ccachePath: "",
|
||||
envKRB5CCNAME: "DIR:" + dirCcache,
|
||||
expected: dirCcacheTicket,
|
||||
},
|
||||
{
|
||||
name: "Unsupported prefix",
|
||||
ccachePath: "",
|
||||
envKRB5CCNAME: "MEMORY:/bad/path",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "Direct file path (no prefix)",
|
||||
ccachePath: "/tmp/myccache",
|
||||
expected: "/tmp/myccache",
|
||||
},
|
||||
{
|
||||
name: "Default to /tmp/krb5cc_<uid>",
|
||||
ccachePath: "",
|
||||
envKRB5CCNAME: "",
|
||||
expected: "/tmp/krb5cc_",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv("KRB5CCNAME", tt.envKRB5CCNAME)
|
||||
result, err := resolveCcachePath(tt.ccachePath)
|
||||
|
||||
if tt.expectError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestKerberosFactory_GetClient_ReloadOnCcacheChange(t *testing.T) {
|
||||
// Create temp ccache file
|
||||
tmpFile, err := os.CreateTemp("", "krb5cc_test")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
t.Logf("Failed to remove temp file %s: %v", tmpFile.Name(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
unixPath := filepath.ToSlash(tmpFile.Name())
|
||||
ccachePath := "FILE:" + unixPath
|
||||
|
||||
initialContent := []byte("CCACHE_VERSION 4\n")
|
||||
_, err = tmpFile.Write(initialContent)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, tmpFile.Close())
|
||||
|
||||
// Setup mocks
|
||||
loadCallCount := 0
|
||||
mockLoadCCache := func(path string) (*credentials.CCache, error) {
|
||||
loadCallCount++
|
||||
return &credentials.CCache{}, nil
|
||||
}
|
||||
|
||||
mockNewClient := func(cc *credentials.CCache, cfg *config.Config, opts ...func(*client.Settings)) (*client.Client, error) {
|
||||
return &client.Client{}, nil
|
||||
}
|
||||
|
||||
mockLoadConfig := func() (*config.Config, error) {
|
||||
return &config.Config{}, nil
|
||||
}
|
||||
factory := &KerberosFactory{
|
||||
loadCCache: mockLoadCCache,
|
||||
newClient: mockNewClient,
|
||||
loadConfig: mockLoadConfig,
|
||||
}
|
||||
|
||||
// First call — triggers loading
|
||||
_, err = factory.GetClient(ccachePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, loadCallCount, "expected 1 load call")
|
||||
|
||||
// Second call — should reuse cache, no additional load
|
||||
_, err = factory.GetClient(ccachePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, loadCallCount, "expected cached reuse, no new load")
|
||||
|
||||
// Simulate file update
|
||||
time.Sleep(1 * time.Second) // ensure mtime changes
|
||||
err = os.WriteFile(tmpFile.Name(), []byte("CCACHE_VERSION 4\n#updated"), 0600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Third call — should detect change, reload
|
||||
_, err = factory.GetClient(ccachePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, loadCallCount, "expected reload on changed ccache")
|
||||
}
|
||||
@@ -107,6 +107,20 @@ Set to 0 to keep connections indefinitely.
|
||||
Help: "Whether the server is configured to be case-insensitive.\n\nAlways true on Windows shares.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "kerberos_ccache",
|
||||
Help: `Path to the Kerberos credential cache (krb5cc).
|
||||
|
||||
Overrides the default KRB5CCNAME environment variable and allows this
|
||||
instance of the SMB backend to use a different Kerberos cache file.
|
||||
This is useful when mounting multiple SMB with different credentials
|
||||
or running in multi-user environments.
|
||||
|
||||
Supported formats:
|
||||
- FILE:/path/to/ccache – Use the specified file.
|
||||
- DIR:/path/to/ccachedir – Use the primary file inside the specified directory.
|
||||
- /path/to/ccache – Interpreted as a file path.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -137,6 +151,7 @@ type Options struct {
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
UseKerberos bool `config:"use_kerberos"`
|
||||
KerberosCCache string `config:"kerberos_ccache"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
@@ -30,3 +30,24 @@ func TestIntegration2(t *testing.T) {
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
|
||||
krb5Dir := t.TempDir()
|
||||
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
|
||||
ccache := filepath.Join(krb5Dir, "ccache")
|
||||
t.Setenv("RCLONE_TEST_CUSTOM_CCACHE_LOCATION", ccache)
|
||||
|
||||
name := "TestSMBKerberosCcache"
|
||||
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":rclone",
|
||||
NilObject: (*smb.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "kerberos_ccache", Value: ccache},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -491,8 +491,8 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
ConnectTimeout: time.Duration(10 * ci.ConnectTimeout), // Use the timeouts in the transport
|
||||
Timeout: time.Duration(10 * ci.Timeout), // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
FetchUntilEmptyPage: opt.FetchUntilEmptyPage,
|
||||
PartialPageFetchThreshold: opt.PartialPageFetchThreshold,
|
||||
|
||||
@@ -12,4 +12,5 @@
|
||||
<seb•ɑƬ•chezwam•ɖɵʈ•org>
|
||||
<allllaboutyou@gmail.com>
|
||||
<psycho@feltzv.fr>
|
||||
<afw5059@gmail.com>
|
||||
<afw5059@gmail.com>
|
||||
<piyushgarg80>
|
||||
@@ -4,12 +4,12 @@ This script checks for unauthorized modifications in autogenerated sections of m
|
||||
It is designed to be used in a GitHub Actions workflow or a local pre-commit hook.
|
||||
|
||||
Features:
|
||||
- Detects markdown files changed in the last commit.
|
||||
- Detects markdown files changed between a commit and one of its ancestors. Default is to
|
||||
check the last commit only. When triggered on a pull request it should typically compare the
|
||||
pull request branch head and its merge base - the commit on the main branch before it diverged.
|
||||
- Identifies modified autogenerated sections marked by specific comments.
|
||||
- Reports violations using GitHub Actions error messages.
|
||||
- Exits with a nonzero status code if unauthorized changes are found.
|
||||
|
||||
It currently only checks the last commit.
|
||||
"""
|
||||
|
||||
import re
|
||||
@@ -22,18 +22,18 @@ def run_git(args):
|
||||
"""
|
||||
return subprocess.run(["git"] + args, stdout=subprocess.PIPE, text=True, check=True).stdout.strip()
|
||||
|
||||
def get_changed_files():
|
||||
def get_changed_files(base, head):
|
||||
"""
|
||||
Retrieve a list of markdown files that were changed in the last commit.
|
||||
Retrieve a list of markdown files that were changed between the base and head commits.
|
||||
"""
|
||||
files = run_git(["diff", "--name-only", "HEAD~1", "HEAD"]).splitlines()
|
||||
files = run_git(["diff", "--name-only", f"{base}...{head}"]).splitlines()
|
||||
return [f for f in files if f.endswith(".md")]
|
||||
|
||||
def get_diff(file):
|
||||
def get_diff(file, base, head):
|
||||
"""
|
||||
Get the diff of a given file between the last commit and the current version.
|
||||
Get the diff of a given file between the base and head commits.
|
||||
"""
|
||||
return run_git(["diff", "-U0", "HEAD~1", "HEAD", "--", file]).splitlines()
|
||||
return run_git(["diff", "-U0", f"{base}...{head}", "--", file]).splitlines()
|
||||
|
||||
def get_file_content(ref, file):
|
||||
"""
|
||||
@@ -70,7 +70,7 @@ def show_error(file_name, line, message):
|
||||
"""
|
||||
print(f"::error file={file_name},line={line}::{message} at {file_name} line {line}")
|
||||
|
||||
def check_file(file):
|
||||
def check_file(file, base, head):
|
||||
"""
|
||||
Check a markdown file for modifications in autogenerated regions.
|
||||
"""
|
||||
@@ -84,7 +84,7 @@ def check_file(file):
|
||||
|
||||
# Entire autogenerated file check.
|
||||
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
|
||||
if get_diff(file):
|
||||
if get_diff(file, base, head):
|
||||
show_error(file, 1, "Autogenerated file modified")
|
||||
return True
|
||||
return False
|
||||
@@ -92,7 +92,7 @@ def check_file(file):
|
||||
# Partial autogenerated regions.
|
||||
regions_new = find_regions(new_lines)
|
||||
regions_old = find_regions(old_lines)
|
||||
diff = get_diff(file)
|
||||
diff = get_diff(file, base, head)
|
||||
hunk_re = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@")
|
||||
new_ln = old_ln = None
|
||||
|
||||
@@ -124,9 +124,15 @@ def main():
|
||||
"""
|
||||
Main function that iterates over changed files and checks them for violations.
|
||||
"""
|
||||
base = "HEAD~1"
|
||||
head = "HEAD"
|
||||
if len(sys.argv) > 1:
|
||||
base = sys.argv[1]
|
||||
if len(sys.argv) > 2:
|
||||
head = sys.argv[2]
|
||||
found = False
|
||||
for f in get_changed_files():
|
||||
if check_file(f):
|
||||
for f in get_changed_files(base, head):
|
||||
if check_file(f, base, head):
|
||||
found = True
|
||||
if found:
|
||||
sys.exit(1)
|
||||
|
||||
3
bin/go-test-cache/go.mod
Normal file
3
bin/go-test-cache/go.mod
Normal file
@@ -0,0 +1,3 @@
|
||||
module go-test-cache
|
||||
|
||||
go 1.24
|
||||
123
bin/go-test-cache/main.go
Normal file
123
bin/go-test-cache/main.go
Normal file
@@ -0,0 +1,123 @@
|
||||
// This code was copied from:
|
||||
// https://github.com/fastly/cli/blob/main/scripts/go-test-cache/main.go
|
||||
// which in turn is based on the following script and was generated using AI.
|
||||
// https://github.com/airplanedev/blog-examples/blob/main/go-test-caching/update_file_timestamps.py?ref=airplane.ghost.io
|
||||
//
|
||||
// REFERENCE ARTICLE:
|
||||
// https://web.archive.org/web/20240308061717/https://www.airplane.dev/blog/caching-golang-tests-in-ci
|
||||
//
|
||||
// It updates the mtime of the files to a mtime dervived from the sha1 hash of their contents.
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
bufSize = 65536
|
||||
baseDate = 1684178360
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
func main() {
|
||||
repoRoot := "."
|
||||
allDirs := make([]string, 0)
|
||||
|
||||
err := filepath.Walk(repoRoot, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
dirPath := filepath.Join(repoRoot, path)
|
||||
relPath, _ := filepath.Rel(repoRoot, dirPath)
|
||||
|
||||
if strings.HasPrefix(relPath, ".") {
|
||||
return nil
|
||||
}
|
||||
|
||||
allDirs = append(allDirs, dirPath)
|
||||
} else {
|
||||
filePath := filepath.Join(repoRoot, path)
|
||||
relPath, _ := filepath.Rel(repoRoot, filePath)
|
||||
|
||||
if strings.HasPrefix(relPath, ".") {
|
||||
return nil
|
||||
}
|
||||
|
||||
sha1Hash, err := getFileSHA1(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modTime := getModifiedTime(sha1Hash)
|
||||
|
||||
log.Printf("Setting modified time of file %s to %s\n", relPath, modTime.Format(timeFormat))
|
||||
err = os.Chtimes(filePath, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal("Error:", err)
|
||||
}
|
||||
|
||||
sort.Slice(allDirs, func(i, j int) bool {
|
||||
return len(allDirs[i]) > len(allDirs[j]) || (len(allDirs[i]) == len(allDirs[j]) && allDirs[i] < allDirs[j])
|
||||
})
|
||||
|
||||
for _, dirPath := range allDirs {
|
||||
relPath, _ := filepath.Rel(repoRoot, dirPath)
|
||||
|
||||
log.Printf("Setting modified time of directory %s to %s\n", relPath, time.Unix(baseDate, 0).Format(timeFormat))
|
||||
err := os.Chtimes(dirPath, time.Unix(baseDate, 0), time.Unix(baseDate, 0))
|
||||
if err != nil {
|
||||
log.Fatal("Error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Done")
|
||||
}
|
||||
|
||||
func getFileSHA1(filePath string) (string, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// G401: Use of weak cryptographic primitive
|
||||
// Disabling as the hash is used not for security reasons.
|
||||
// The hash is used as a cache key to improve test run times.
|
||||
// #nosec
|
||||
// nosemgrep: go.lang.security.audit.crypto.use_of_weak_crypto.use-of-sha1
|
||||
hash := sha1.New()
|
||||
if _, err := io.CopyBuffer(hash, file, make([]byte, bufSize)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func getModifiedTime(sha1Hash string) time.Time {
|
||||
hashBytes := []byte(sha1Hash)
|
||||
lastFiveBytes := hashBytes[:5]
|
||||
lastFiveValue := int64(0)
|
||||
|
||||
for _, b := range lastFiveBytes {
|
||||
lastFiveValue = (lastFiveValue << 8) + int64(b)
|
||||
}
|
||||
|
||||
modTime := baseDate - (lastFiveValue % 10000)
|
||||
return time.Unix(modTime, 0)
|
||||
}
|
||||
@@ -278,7 +278,7 @@ func testBisync(t *testing.T, path1, path2 string) {
|
||||
}
|
||||
bisync.Colors = true
|
||||
time.Local = bisync.TZ
|
||||
ci.FsCacheExpireDuration = 5 * time.Hour
|
||||
ci.FsCacheExpireDuration = fs.Duration(5 * time.Hour)
|
||||
|
||||
baseDir, err := os.Getwd()
|
||||
require.NoError(t, err, "get current directory")
|
||||
|
||||
@@ -55,7 +55,7 @@ type Options struct {
|
||||
Compare CompareOpt
|
||||
CompareFlag string
|
||||
DebugName string
|
||||
MaxLock time.Duration
|
||||
MaxLock fs.Duration
|
||||
ConflictResolve Prefer
|
||||
ConflictLoser ConflictLoserAction
|
||||
ConflictSuffixFlag string
|
||||
@@ -146,7 +146,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.DownloadHash, "download-hash", "", Opt.Compare.DownloadHash, "Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)", "")
|
||||
flags.DurationVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", Opt.MaxLock, "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictResolve, "conflict-resolve", "", "Automatically resolve conflicts by preferring the version that is: "+ConflictResolveList+" (default: none)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictLoser, "conflict-loser", "", "Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): "+ConflictLoserList+" (default: num)", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.ConflictSuffixFlag, "conflict-suffix", "", Opt.ConflictSuffixFlag, "Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')", "")
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
const basicallyforever = 200 * 365 * 24 * time.Hour
|
||||
const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour)
|
||||
|
||||
var stopRenewal func()
|
||||
|
||||
@@ -66,9 +66,9 @@ func (b *bisyncRun) removeLockFile() {
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setLockFileExpiration() {
|
||||
if b.opt.MaxLock > 0 && b.opt.MaxLock < 2*time.Minute {
|
||||
if b.opt.MaxLock > 0 && b.opt.MaxLock < fs.Duration(2*time.Minute) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "--max-lock cannot be shorter than 2 minutes (unless 0.) Changing --max-lock from %v to %v"), b.opt.MaxLock, 2*time.Minute)
|
||||
b.opt.MaxLock = 2 * time.Minute
|
||||
b.opt.MaxLock = fs.Duration(2 * time.Minute)
|
||||
} else if b.opt.MaxLock <= 0 {
|
||||
b.opt.MaxLock = basicallyforever
|
||||
}
|
||||
@@ -80,7 +80,7 @@ func (b *bisyncRun) renewLockFile() {
|
||||
data.Session = b.basePath
|
||||
data.PID = strconv.Itoa(os.Getpid())
|
||||
data.TimeRenewed = time.Now()
|
||||
data.TimeExpires = time.Now().Add(b.opt.MaxLock)
|
||||
data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
|
||||
|
||||
// save data file
|
||||
df, err := os.Create(b.lockFile)
|
||||
@@ -131,7 +131,7 @@ func (b *bisyncRun) startLockRenewal() func() {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ticker := time.NewTicker(b.opt.MaxLock - time.Minute)
|
||||
ticker := time.NewTicker(time.Duration(b.opt.MaxLock) - time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
|
||||
@@ -138,7 +138,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
if b.SyncCI != nil {
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.MaxDuration = fs.Duration(1 * time.Second)
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
|
||||
@@ -376,8 +376,8 @@ func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
|
||||
return files.Save(queueFile)
|
||||
}
|
||||
|
||||
func naptime(totalWait time.Duration) {
|
||||
expireTime := time.Now().Add(totalWait)
|
||||
func naptime(totalWait fs.Duration) {
|
||||
expireTime := time.Now().Add(time.Duration(totalWait))
|
||||
fs.Logf(nil, "will retry in %v at %v", totalWait, expireTime.Format("2006-01-02 15:04:05 MST"))
|
||||
for i := 0; time.Until(expireTime) > 0; i++ {
|
||||
if i > 0 && i%10 == 0 {
|
||||
|
||||
@@ -73,7 +73,7 @@ you what happened to it. These are reminiscent of diff files.
|
||||
- |* path| means path was present in source and destination but different.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
|
||||
The default number of parallel checks is 8. See the [--checkers=N](/docs/#checkers-n)
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
`, "|", "`")
|
||||
|
||||
|
||||
@@ -288,7 +288,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
accounting.GlobalStats().ResetErrors()
|
||||
}
|
||||
if ci.RetriesInterval > 0 {
|
||||
time.Sleep(ci.RetriesInterval)
|
||||
time.Sleep(time.Duration(ci.RetriesInterval))
|
||||
}
|
||||
}
|
||||
stopStats()
|
||||
|
||||
@@ -240,7 +240,7 @@ to run as the SYSTEM account. A third alternative is to use the
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [`--config`](https://rclone.org/docs/#config-config-file) option.
|
||||
with the [`--config`](https://rclone.org/docs/#config-string) option.
|
||||
Note also that it is now the SYSTEM account that will have the owner
|
||||
permissions, and other accounts will have permissions according to the
|
||||
group or others scopes. As mentioned above, these will then not get the
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -35,6 +37,11 @@ implement this command directly, in which case ` + "`--checkers`" + ` will be ig
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDir(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
ctx := context.Background()
|
||||
fi := filter.GetConfig(ctx)
|
||||
if !fi.InActive() {
|
||||
fs.Fatalf(nil, "filters are not supported with purge (purge will delete everything unconditionally)")
|
||||
}
|
||||
return operations.Purge(context.Background(), fdst, "")
|
||||
})
|
||||
},
|
||||
|
||||
@@ -14,13 +14,13 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
pollInterval = 10 * time.Second
|
||||
pollInterval = fs.Duration(10 * time.Second)
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.DurationVarP(cmdFlags, &pollInterval, "poll-interval", "", pollInterval, "Time to wait between polling for changes", "")
|
||||
flags.FVarP(cmdFlags, &pollInterval, "poll-interval", "", "Time to wait between polling for changes", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -39,7 +39,7 @@ var commandDefinition = &cobra.Command{
|
||||
if do := features.ChangeNotify; do != nil {
|
||||
pollChan := make(chan time.Duration)
|
||||
do(ctx, changeNotify, pollChan)
|
||||
pollChan <- pollInterval
|
||||
pollChan <- time.Duration(pollInterval)
|
||||
fs.Logf(nil, "Waiting for changes, polling every %v", pollInterval)
|
||||
} else {
|
||||
return errors.New("poll-interval is not supported by this remote")
|
||||
|
||||
@@ -40,7 +40,7 @@ var (
|
||||
checkStreaming bool
|
||||
checkBase32768 bool
|
||||
all bool
|
||||
uploadWait time.Duration
|
||||
uploadWait fs.Duration
|
||||
positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`)
|
||||
positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`)
|
||||
positionRightRe = regexp.MustCompile(`(?s)^position-right-([[:xdigit:]]+)-(.*)$`)
|
||||
@@ -52,7 +52,7 @@ func init() {
|
||||
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file", "")
|
||||
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization", "")
|
||||
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters", "")
|
||||
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file", "")
|
||||
flags.FVarP(cmdFlags, &uploadWait, "upload-wait", "", "Wait after writing a file", "")
|
||||
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length", "")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size", "")
|
||||
flags.BoolVarP(cmdFlags, &checkBase32768, "check-base32768", "", false, "Check can store all possible base32768 characters", "")
|
||||
@@ -204,7 +204,7 @@ func (r *results) writeFile(path string) (fs.Object, error) {
|
||||
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
||||
obj, err := r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
|
||||
if uploadWait > 0 {
|
||||
time.Sleep(uploadWait)
|
||||
time.Sleep(time.Duration(uploadWait))
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
|
||||
@@ -422,7 +422,7 @@ put them back in again.` >}}
|
||||
* Dov Murik <dov.murik@gmail.com>
|
||||
* Ameer Dawood <ameer1234567890@gmail.com>
|
||||
* Dan Hipschman <dan.hipschman@opendoor.com>
|
||||
* Josh Soref <jsoref@users.noreply.github.com>
|
||||
* Josh Soref <jsoref@users.noreply.github.com> <2119212+jsoref@users.noreply.github.com>
|
||||
* David <david@staron.nl>
|
||||
* Ingo <ingo@hoffmann.cx>
|
||||
* Adam Plánský <adamplansky@users.noreply.github.com> <adamplansky@gmail.com>
|
||||
@@ -637,7 +637,7 @@ put them back in again.` >}}
|
||||
* anonion <aman207@users.noreply.github.com>
|
||||
* Ryan Morey <4590343+rmorey@users.noreply.github.com>
|
||||
* Simon Bos <simonbos9@gmail.com>
|
||||
* YFdyh000 <yfdyh000@gmail.com> * Josh Soref <2119212+jsoref@users.noreply.github.com>
|
||||
* YFdyh000 <yfdyh000@gmail.com>
|
||||
* Øyvind Heddeland Instefjord <instefjord@outlook.com>
|
||||
* Dmitry Deniskin <110819396+ddeniskin@users.noreply.github.com>
|
||||
* Alexander Knorr <106825+opexxx@users.noreply.github.com>
|
||||
@@ -788,7 +788,6 @@ put them back in again.` >}}
|
||||
* Adithya Kumar <akumar42@protonmail.com>
|
||||
* Tayo-pasedaRJ <138471223+Tayo-pasedaRJ@users.noreply.github.com>
|
||||
* Peter Kreuser <logo@kreuser.name>
|
||||
* Piyush <piyushgarg80>
|
||||
* fotile96 <fotile96@users.noreply.github.com>
|
||||
* Luc Ritchie <luc.ritchie@gmail.com>
|
||||
* cynful <cynful@users.noreply.github.com>
|
||||
@@ -991,3 +990,5 @@ put them back in again.` >}}
|
||||
* Davide Bizzarri <davide.bizzarri@willhaben.at>
|
||||
* Ross Smith II <ross@smithii.com>
|
||||
* Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com>
|
||||
* Sudipto Baral <sudiptobaral.me@gmail.com>
|
||||
* Sam Pegg <samrpegg@gmail.com>
|
||||
|
||||
@@ -635,7 +635,7 @@ directory separators.) To address this particular issue, an additional
|
||||
`2006-01-02 0304PM`.
|
||||
|
||||
Note that `--conflict-suffix` is entirely separate from rclone's main
|
||||
[`--sufix`](/docs/#suffix-suffix) flag. This is intentional, as users may wish
|
||||
[`--sufix`](/docs/#suffix-string) flag. This is intentional, as users may wish
|
||||
to use both flags simultaneously, if also using
|
||||
[`--backup-dir`](#backup-dir1-and-backup-dir2).
|
||||
|
||||
@@ -810,7 +810,7 @@ without requiring the user to get involved and run a `--resync`. (See also:
|
||||
|
||||
### --backup-dir1 and --backup-dir2
|
||||
|
||||
As of `v1.66`, [`--backup-dir`](/docs/#backup-dir-dir) is supported in bisync.
|
||||
As of `v1.66`, [`--backup-dir`](/docs/#backup-dir-string) is supported in bisync.
|
||||
Because `--backup-dir` must be a non-overlapping path on the same remote,
|
||||
Bisync has introduced new `--backup-dir1` and `--backup-dir2` flags to support
|
||||
separate backup-dirs for `Path1` and `Path2` (bisyncing between different
|
||||
@@ -841,7 +841,7 @@ In the event of a [rename due to a sync conflict](#conflict-loser), the
|
||||
rename is not considered a delete, unless a previous conflict with the same
|
||||
name already exists and would get overwritten.
|
||||
|
||||
See also: [`--suffix`](/docs/#suffix-suffix),
|
||||
See also: [`--suffix`](/docs/#suffix-string),
|
||||
[`--suffix-keep-extension`](/docs/#suffix-keep-extension)
|
||||
|
||||
## Operation
|
||||
@@ -1831,7 +1831,7 @@ about _Unison_ and synchronization in general.
|
||||
* Final listings are now generated from sync results, to avoid needing to re-list
|
||||
* Bisync is now much more resilient to changes that happen during a bisync run, and far less prone to critical errors / undetected changes
|
||||
* Bisync is now capable of rolling a file listing back in cases of uncertainty, essentially marking the file as needing to be rechecked next time.
|
||||
* A few basic terminal colors are now supported, controllable with [`--color`](/docs/#color-when) (`AUTO`|`NEVER`|`ALWAYS`)
|
||||
* A few basic terminal colors are now supported, controllable with [`--color`](/docs/#color) (`AUTO`|`NEVER`|`ALWAYS`)
|
||||
* Initial listing snapshots of Path1 and Path2 are now generated concurrently, using the same "march" infrastructure as `check` and `sync`,
|
||||
for performance improvements and less [risk of error](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=4.%20Listings%20should%20alternate%20between%20paths%20to%20minimize%20errors).
|
||||
* Fixed handling of unicode normalization and case insensitivity, support for [`--fix-case`](/docs/#fix-case), [`--ignore-case-sync`](/docs/#ignore-case-sync), [`--no-unicode-normalization`](/docs/#no-unicode-normalization)
|
||||
|
||||
@@ -5,6 +5,25 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.70.3 - 2025-07-09
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.70.2...v1.70.3)
|
||||
|
||||
* Bug Fixes
|
||||
* check: Fix difference report (was reporting error counts) (albertony)
|
||||
* march: Fix deadlock when using `--no-traverse` (Nick Craig-Wood)
|
||||
* doc fixes (albertony, Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Fix server side copy error "requires exactly one scope" (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix finding objects when using `--b2-version-at` (Davide Bizzarri)
|
||||
* Linkbox
|
||||
* Fix upload error "user upload file not exist" (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Improve error handling for missing links and unrecoverable 500s (wiserain)
|
||||
* WebDAV
|
||||
* Fix setting modtime to that of local object instead of remote (WeidiDeng)
|
||||
|
||||
## v1.70.2 - 2025-06-27
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.70.1...v1.70.2)
|
||||
@@ -49,7 +68,7 @@ description: "Rclone Changelog"
|
||||
* New Features
|
||||
* Add [`--max-connections`](/docs/#max-connections-n) to control maximum backend concurrency (Nick Craig-Wood)
|
||||
* Add [`--max-buffer-memory`](/docs/#max-buffer-memory) to limit total buffer memory usage (Nick Craig-Wood)
|
||||
* Add transform library and [`--name-transform`](/docs/#name-transform-command-xxxx) flag (nielash)
|
||||
* Add transform library and [`--name-transform`](/docs/#name-transform-stringarray) flag (nielash)
|
||||
* sync: Implement [`--list-cutoff`](/docs/#list-cutoff) to allow on disk sorting for reduced memory use (Nick Craig-Wood)
|
||||
* accounting: Add listed stat for number of directory entries listed (Nick Craig-Wood)
|
||||
* backend: Skip hash calculation when the hashType is None (Oleksiy Stashok)
|
||||
@@ -758,7 +777,7 @@ description: "Rclone Changelog"
|
||||
* Final listings are now generated from sync results, to avoid needing to re-list (nielash)
|
||||
* Bisync is now much more resilient to changes that happen during a bisync run, and far less prone to critical errors / undetected changes (nielash)
|
||||
* Bisync is now capable of rolling a file listing back in cases of uncertainty, essentially marking the file as needing to be rechecked next time. (nielash)
|
||||
* A few basic terminal colors are now supported, controllable with [`--color`](/docs/#color-when) (`AUTO`|`NEVER`|`ALWAYS`) (nielash)
|
||||
* A few basic terminal colors are now supported, controllable with [`--color`](/docs/#color-autoneveralways) (`AUTO`|`NEVER`|`ALWAYS`) (nielash)
|
||||
* Initial listing snapshots of Path1 and Path2 are now generated concurrently, using the same "march" infrastructure as `check` and `sync`, for performance improvements and less risk of error. (nielash)
|
||||
* `--resync` is now much more efficient (especially for users of `--create-empty-src-dirs`) (nielash)
|
||||
* Google Docs (and other files of unknown size) are now supported (with the same options as in `sync`) (nielash)
|
||||
@@ -770,7 +789,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* A new `--max-lock` setting allows lock files to automatically renew and expire, for better automatic recovery when a run is interrupted. (nielash)
|
||||
* Bisync now supports auto-resolving sync conflicts and customizing rename behavior with new [`--conflict-resolve`](#conflict-resolve), [`--conflict-loser`](#conflict-loser), and [`--conflict-suffix`](#conflict-suffix) flags. (nielash)
|
||||
* A new [`--resync-mode`](#resync-mode) flag allows more control over which version of a file gets kept during a `--resync`. (nielash)
|
||||
* Bisync now supports [`--retries`](/docs/#retries-int) and [`--retries-sleep`](/docs/#retries-sleep-time) (when [`--resilient`](#resilient) is set.) (nielash)
|
||||
* Bisync now supports [`--retries`](/docs/#retries-int) and [`--retries-sleep`](/docs/#retries-sleep-duration) (when [`--resilient`](#resilient) is set.) (nielash)
|
||||
* Clarify file operation directions in dry-run logs (Kyle Reynolds)
|
||||
* Local
|
||||
* Fix cleanRootPath on Windows after go1.21.4 stdlib update (nielash)
|
||||
@@ -3021,7 +3040,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* New Features
|
||||
* The [VFS layer](/commands/rclone_mount/#vfs-virtual-file-system) was heavily reworked for this release - see below for more details
|
||||
* Interactive mode [-i/--interactive](/docs/#interactive) for destructive operations (fishbullet)
|
||||
* Add [--bwlimit-file](/docs/#bwlimit-file-bandwidth-spec) flag to limit speeds of individual file transfers (Nick Craig-Wood)
|
||||
* Add [--bwlimit-file](/docs/#bwlimit-file-bwtimetable) flag to limit speeds of individual file transfers (Nick Craig-Wood)
|
||||
* Transfers are sorted by start time in the stats and progress output (Max Sum)
|
||||
* Make sure backends expand `~` and environment vars in file names they use (Nick Craig-Wood)
|
||||
* Add [--refresh-times](/docs/#refresh-times) flag to set modtimes on hashless backends (Nick Craig-Wood)
|
||||
|
||||
1005
docs/content/docs.md
1005
docs/content/docs.md
File diff suppressed because it is too large
Load Diff
@@ -233,7 +233,7 @@ Restart-Service hns
|
||||
It is likely you have more than 10,000 files that need to be
|
||||
synced. By default, rclone only gets 10,000 files ahead in a sync so as
|
||||
not to use up too much memory. You can change this default with the
|
||||
[--max-backlog](/docs/#max-backlog-n) flag.
|
||||
[--max-backlog](/docs/#max-backlog-int) flag.
|
||||
|
||||
### Rclone is using too much memory or appears to have a memory leak
|
||||
|
||||
|
||||
@@ -99,6 +99,21 @@ excess files in the directory.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:dir
|
||||
|
||||
### Hashes
|
||||
|
||||
In December 2024 `files.com` started [supporting more checksums](https://www.files.com/blog/2024/11/01/new-modern-checksum-options-now-available-with-opt).
|
||||
|
||||
However if you want hashes you **must** enable them in your site by
|
||||
visiting the "File Integrity" section in "Data Governance" to
|
||||
configure your settings and select the checksum algorithms that meet
|
||||
your requirements.
|
||||
|
||||
Rclone currently only support CRC32 and MD5 for use in syncing but
|
||||
selecting more checksums will not affect rclone's operations.
|
||||
|
||||
For use with rclone, selecting at least MD5 is recommended so rclone
|
||||
can do an end to end integrity check.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/filescom/filescom.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
|
||||
@@ -683,7 +683,7 @@ Default units are `KiB` but abbreviations `B`, `K`, `M`, `G`, `T` or `P` are val
|
||||
E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 KiB
|
||||
size or larger.
|
||||
|
||||
See [the size option docs](/docs/#size-option) for more info.
|
||||
See [the size option docs](/docs/#size-options) for more info.
|
||||
|
||||
### `--max-size` - Don't transfer any file larger than this
|
||||
|
||||
@@ -693,7 +693,7 @@ Default units are `KiB` but abbreviations `B`, `K`, `M`, `G`, `T` or `P` are val
|
||||
E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GiB
|
||||
size or smaller.
|
||||
|
||||
See [the size option docs](/docs/#size-option) for more info.
|
||||
See [the size option docs](/docs/#size-options) for more info.
|
||||
|
||||
### `--max-age` - Don't transfer any file older than this
|
||||
|
||||
@@ -704,7 +704,7 @@ Controls the maximum age of files within the scope of an rclone command.
|
||||
E.g. `rclone ls remote: --max-age 2d` lists files on `remote:` of 2 days
|
||||
old or less.
|
||||
|
||||
See [the time option docs](/docs/#time-option) for valid formats.
|
||||
See [the time option docs](/docs/#time-options) for valid formats.
|
||||
|
||||
### `--min-age` - Don't transfer any file younger than this
|
||||
|
||||
@@ -716,7 +716,7 @@ Controls the minimum age of files within the scope of an rclone command.
|
||||
E.g. `rclone ls remote: --min-age 2d` lists files on `remote:` of 2 days
|
||||
old or more.
|
||||
|
||||
See [the time option docs](/docs/#time-option) for valid formats.
|
||||
See [the time option docs](/docs/#time-options) for valid formats.
|
||||
|
||||
### `--hash-filter` - Deterministically select a subset of files {#hash-filter}
|
||||
|
||||
|
||||
@@ -491,9 +491,9 @@ the locations that rclone will use.
|
||||
|
||||
To override them set the corresponding options (as command-line arguments, or as
|
||||
[environment variables](https://rclone.org/docs/#environment-variables)):
|
||||
- [--config](https://rclone.org/docs/#config-config-file)
|
||||
- [--cache-dir](https://rclone.org/docs/#cache-dir-dir)
|
||||
- [--temp-dir](https://rclone.org/docs/#temp-dir-dir)
|
||||
- [--config](https://rclone.org/docs/#config-string)
|
||||
- [--cache-dir](https://rclone.org/docs/#cache-dir-string)
|
||||
- [--temp-dir](https://rclone.org/docs/#temp-dir-string)
|
||||
|
||||
## Autostart
|
||||
|
||||
@@ -546,7 +546,7 @@ NOTE: Remember that when rclone runs as the `SYSTEM` user, the user profile
|
||||
that it sees will not be yours. This means that if you normally run rclone with
|
||||
configuration file in the default location, to be able to use the same configuration
|
||||
when running as the system user you must explicitly tell rclone where to find
|
||||
it with the [`--config`](https://rclone.org/docs/#config-config-file) option,
|
||||
it with the [`--config`](https://rclone.org/docs/#config-string) option,
|
||||
or else it will look in the system users profile path (`C:\Windows\System32\config\systemprofile`).
|
||||
To test your command manually from a Command Prompt, you can run it with
|
||||
the [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec)
|
||||
|
||||
@@ -257,7 +257,7 @@ flag.
|
||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||
source does not have an MD5 checksum then the file will be cached
|
||||
temporarily on disk (in location given by
|
||||
[--temp-dir](/docs/#temp-dir-dir)) before it is uploaded.
|
||||
[--temp-dir](/docs/#temp-dir-string)) before it is uploaded.
|
||||
Small files will be cached in memory - see the
|
||||
[--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
||||
When uploading from local disk the source checksum is always available,
|
||||
|
||||
@@ -516,7 +516,7 @@ upon backend-specific capabilities.
|
||||
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| iCloud Drive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| ImageKit | Yes | Yes | Yes | No | No | No | No | No | No | No | Yes |
|
||||
| ImageKit | Yes | No | Yes | No | No | No | No | No | No | No | Yes |
|
||||
| Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| Koofr | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
|
||||
@@ -546,8 +546,8 @@ And this is equivalent to `/tmp/dir`
|
||||
|
||||
```
|
||||
{
|
||||
type = "local",
|
||||
_root = "/tmp/dir"
|
||||
"type": "local",
|
||||
"_root": "/tmp/dir"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -2359,8 +2359,8 @@ e.g.
|
||||
"fs": "/tmp",
|
||||
"remote": 3
|
||||
},
|
||||
"status": 400
|
||||
"path": "operations/rmdir",
|
||||
"status": 400,
|
||||
"path": "operations/rmdir"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ off donation.
|
||||
|
||||
Thank you very much to our sponsors:
|
||||
|
||||
{{< sponsor src="/img/logos/backblaze.svg" width="300" height="200" title="Visit our sponsor Backblaze" link="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715">}}
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
|
||||
@@ -10,6 +10,15 @@
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
Platinum Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715" target="_blank" rel="noopener" title="Visit rclone's sponsor Backblaze"><img src="/img/logos/backblaze.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header" style="padding: 5px 15px;">
|
||||
Gold Sponsor
|
||||
|
||||
@@ -304,7 +304,7 @@ func (acc *Account) ServerSideTransferEnd(n int64) {
|
||||
acc.stats.Bytes(n)
|
||||
}
|
||||
|
||||
// serverSideEnd accounts for non specific server side data
|
||||
// serverSideEnd accounts for non specific server-side data
|
||||
func (acc *Account) serverSideEnd(n int64) {
|
||||
// Account for bytes unless we are checking
|
||||
if !acc.checking {
|
||||
@@ -312,13 +312,13 @@ func (acc *Account) serverSideEnd(n int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// ServerSideCopyEnd accounts for a read of n bytes in a sever side copy
|
||||
// ServerSideCopyEnd accounts for a read of n bytes in a server-side copy
|
||||
func (acc *Account) ServerSideCopyEnd(n int64) {
|
||||
acc.stats.AddServerSideCopy(n)
|
||||
acc.serverSideEnd(n)
|
||||
}
|
||||
|
||||
// ServerSideMoveEnd accounts for a read of n bytes in a sever side move
|
||||
// ServerSideMoveEnd accounts for a read of n bytes in a server-side move
|
||||
func (acc *Account) ServerSideMoveEnd(n int64) {
|
||||
acc.stats.AddServerSideMove(n)
|
||||
acc.serverSideEnd(n)
|
||||
|
||||
5
fs/cache/cache.go
vendored
5
fs/cache/cache.go
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
@@ -25,8 +26,8 @@ func createOnFirstUse() {
|
||||
once.Do(func() {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
c = cache.New()
|
||||
c.SetExpireDuration(ci.FsCacheExpireDuration)
|
||||
c.SetExpireInterval(ci.FsCacheExpireInterval)
|
||||
c.SetExpireDuration(time.Duration(ci.FsCacheExpireDuration))
|
||||
c.SetExpireInterval(time.Duration(ci.FsCacheExpireInterval))
|
||||
c.SetFinalizer(func(value any) {
|
||||
if s, ok := value.(fs.Shutdowner); ok {
|
||||
_ = fs.CountError(context.Background(), s.Shutdown(context.Background()))
|
||||
|
||||
20
fs/config.go
20
fs/config.go
@@ -570,12 +570,12 @@ type ConfigInfo struct {
|
||||
IgnoreTimes bool `config:"ignore_times"`
|
||||
IgnoreExisting bool `config:"ignore_existing"`
|
||||
IgnoreErrors bool `config:"ignore_errors"`
|
||||
ModifyWindow time.Duration `config:"modify_window"`
|
||||
ModifyWindow Duration `config:"modify_window"`
|
||||
Checkers int `config:"checkers"`
|
||||
Transfers int `config:"transfers"`
|
||||
ConnectTimeout time.Duration `config:"contimeout"` // Connect timeout
|
||||
Timeout time.Duration `config:"timeout"` // Data channel timeout
|
||||
ExpectContinueTimeout time.Duration `config:"expect_continue_timeout"`
|
||||
ConnectTimeout Duration `config:"contimeout"` // Connect timeout
|
||||
Timeout Duration `config:"timeout"` // Data channel timeout
|
||||
ExpectContinueTimeout Duration `config:"expect_continue_timeout"`
|
||||
Dump DumpFlags `config:"dump"`
|
||||
InsecureSkipVerify bool `config:"no_check_certificate"` // Skip server certificate verification
|
||||
DeleteMode DeleteMode `config:"delete_mode"`
|
||||
@@ -584,7 +584,7 @@ type ConfigInfo struct {
|
||||
TrackRenames bool `config:"track_renames"` // Track file renames.
|
||||
TrackRenamesStrategy string `config:"track_renames_strategy"` // Comma separated list of strategies used to track renames
|
||||
Retries int `config:"retries"` // High-level retries
|
||||
RetriesInterval time.Duration `config:"retries_sleep"`
|
||||
RetriesInterval Duration `config:"retries_sleep"`
|
||||
LowLevelRetries int `config:"low_level_retries"`
|
||||
UpdateOlder bool `config:"update"` // Skip files that are newer on the destination
|
||||
NoGzip bool `config:"no_gzip_encoding"` // Disable compression
|
||||
@@ -623,7 +623,7 @@ type ConfigInfo struct {
|
||||
PasswordCommand SpaceSepList `config:"password_command"`
|
||||
UseServerModTime bool `config:"use_server_modtime"`
|
||||
MaxTransfer SizeSuffix `config:"max_transfer"`
|
||||
MaxDuration time.Duration `config:"max_duration"`
|
||||
MaxDuration Duration `config:"max_duration"`
|
||||
CutoffMode CutoffMode `config:"cutoff_mode"`
|
||||
MaxBacklog int `config:"max_backlog"`
|
||||
MaxStatsGroups int `config:"max_stats_groups"`
|
||||
@@ -652,11 +652,11 @@ type ConfigInfo struct {
|
||||
RefreshTimes bool `config:"refresh_times"`
|
||||
NoConsole bool `config:"no_console"`
|
||||
TrafficClass uint8 `config:"traffic_class"`
|
||||
FsCacheExpireDuration time.Duration `config:"fs_cache_expire_duration"`
|
||||
FsCacheExpireInterval time.Duration `config:"fs_cache_expire_interval"`
|
||||
FsCacheExpireDuration Duration `config:"fs_cache_expire_duration"`
|
||||
FsCacheExpireInterval Duration `config:"fs_cache_expire_interval"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
HumanReadable bool `config:"human_readable"`
|
||||
KvLockTime time.Duration `config:"kv_lock_time"` // maximum time to keep key-value database locked by process
|
||||
KvLockTime Duration `config:"kv_lock_time"` // maximum time to keep key-value database locked by process
|
||||
DisableHTTPKeepAlives bool `config:"disable_http_keep_alives"`
|
||||
Metadata bool `config:"metadata"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
@@ -766,7 +766,7 @@ func InitialLogLevel() LogLevel {
|
||||
// TimeoutOrInfinite returns ci.Timeout if > 0 or infinite otherwise
|
||||
func (ci *ConfigInfo) TimeoutOrInfinite() time.Duration {
|
||||
if ci.Timeout > 0 {
|
||||
return ci.Timeout
|
||||
return time.Duration(ci.Timeout)
|
||||
}
|
||||
return ModTimeNotSupported
|
||||
}
|
||||
|
||||
2
fs/fs.go
2
fs/fs.go
@@ -77,7 +77,7 @@ func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) {
|
||||
// GetModifyWindow calculates the maximum modify window between the given Fses
|
||||
// and the Config.ModifyWindow parameter.
|
||||
func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration {
|
||||
window := GetConfig(ctx).ModifyWindow
|
||||
window := time.Duration(GetConfig(ctx).ModifyWindow)
|
||||
for _, f := range fss {
|
||||
if f != nil {
|
||||
precision := f.Precision()
|
||||
|
||||
@@ -27,10 +27,10 @@ func NewDialer(ctx context.Context) *Dialer {
|
||||
ci := fs.GetConfig(ctx)
|
||||
dialer := &Dialer{
|
||||
Dialer: net.Dialer{
|
||||
Timeout: ci.ConnectTimeout,
|
||||
Timeout: time.Duration(ci.ConnectTimeout),
|
||||
KeepAlive: 30 * time.Second,
|
||||
},
|
||||
timeout: ci.Timeout,
|
||||
timeout: time.Duration(ci.Timeout),
|
||||
tclass: int(ci.TrafficClass),
|
||||
}
|
||||
if ci.BindAddr != nil {
|
||||
|
||||
@@ -58,8 +58,8 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht
|
||||
t.Proxy = http.ProxyFromEnvironment
|
||||
t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1)
|
||||
t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost
|
||||
t.TLSHandshakeTimeout = ci.ConnectTimeout
|
||||
t.ResponseHeaderTimeout = ci.Timeout
|
||||
t.TLSHandshakeTimeout = time.Duration(ci.ConnectTimeout)
|
||||
t.ResponseHeaderTimeout = time.Duration(ci.Timeout)
|
||||
t.DisableKeepAlives = ci.DisableHTTPKeepAlives
|
||||
|
||||
// TLS Config
|
||||
@@ -109,7 +109,7 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht
|
||||
return NewDialer(ctx).DialContext(reqCtx, network, addr)
|
||||
}
|
||||
t.IdleConnTimeout = 60 * time.Second
|
||||
t.ExpectContinueTimeout = ci.ExpectContinueTimeout
|
||||
t.ExpectContinueTimeout = time.Duration(ci.ExpectContinueTimeout)
|
||||
|
||||
if ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
fs.Debugf(nil, "You have specified to dump information. Please be noted that the "+
|
||||
|
||||
@@ -148,7 +148,7 @@ func (jobs *Jobs) kickExpire() {
|
||||
jobs.mu.Lock()
|
||||
defer jobs.mu.Unlock()
|
||||
if !jobs.expireRunning {
|
||||
time.AfterFunc(jobs.opt.JobExpireInterval, jobs.Expire)
|
||||
time.AfterFunc(time.Duration(jobs.opt.JobExpireInterval), jobs.Expire)
|
||||
jobs.expireRunning = true
|
||||
}
|
||||
}
|
||||
@@ -160,13 +160,13 @@ func (jobs *Jobs) Expire() {
|
||||
now := time.Now()
|
||||
for ID, job := range jobs.jobs {
|
||||
job.mu.Lock()
|
||||
if job.Finished && now.Sub(job.EndTime) > jobs.opt.JobExpireDuration {
|
||||
if job.Finished && now.Sub(job.EndTime) > time.Duration(jobs.opt.JobExpireDuration) {
|
||||
delete(jobs.jobs, ID)
|
||||
}
|
||||
job.mu.Unlock()
|
||||
}
|
||||
if len(jobs.jobs) != 0 {
|
||||
time.AfterFunc(jobs.opt.JobExpireInterval, jobs.Expire)
|
||||
time.AfterFunc(time.Duration(jobs.opt.JobExpireInterval), jobs.Expire)
|
||||
jobs.expireRunning = true
|
||||
} else {
|
||||
jobs.expireRunning = false
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestNewJobs(t *testing.T) {
|
||||
func TestJobsKickExpire(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
jobs := newJobs()
|
||||
jobs.opt.JobExpireInterval = time.Millisecond
|
||||
jobs.opt.JobExpireInterval = fs.Duration(time.Millisecond)
|
||||
assert.Equal(t, false, jobs.expireRunning)
|
||||
jobs.kickExpire()
|
||||
jobs.mu.Lock()
|
||||
@@ -41,7 +41,7 @@ func TestJobsExpire(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
wait := make(chan struct{})
|
||||
jobs := newJobs()
|
||||
jobs.opt.JobExpireInterval = time.Millisecond
|
||||
jobs.opt.JobExpireInterval = fs.Duration(time.Millisecond)
|
||||
assert.Equal(t, false, jobs.expireRunning)
|
||||
var gotJobID int64
|
||||
var gotJob *Job
|
||||
@@ -64,7 +64,7 @@ func TestJobsExpire(t *testing.T) {
|
||||
assert.Equal(t, 1, len(jobs.jobs))
|
||||
jobs.mu.Lock()
|
||||
job.mu.Lock()
|
||||
job.EndTime = time.Now().Add(-rc.Opt.JobExpireDuration - 60*time.Second)
|
||||
job.EndTime = time.Now().Add(-time.Duration(rc.Opt.JobExpireDuration) - 60*time.Second)
|
||||
assert.Equal(t, true, jobs.expireRunning)
|
||||
job.mu.Unlock()
|
||||
jobs.mu.Unlock()
|
||||
|
||||
@@ -75,12 +75,12 @@ var OptionsInfo = fs.Options{{
|
||||
Groups: "RC,Metrics",
|
||||
}, {
|
||||
Name: "rc_job_expire_duration",
|
||||
Default: 60 * time.Second,
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: "Expire finished async jobs older than this value",
|
||||
Groups: "RC",
|
||||
}, {
|
||||
Name: "rc_job_expire_interval",
|
||||
Default: 10 * time.Second,
|
||||
Default: fs.Duration(10 * time.Second),
|
||||
Help: "Interval to check for expired async jobs",
|
||||
Groups: "RC",
|
||||
}, {
|
||||
@@ -120,8 +120,8 @@ type Options struct {
|
||||
MetricsHTTP libhttp.Config `config:"metrics"`
|
||||
MetricsAuth libhttp.AuthConfig `config:"metrics"`
|
||||
MetricsTemplate libhttp.TemplateConfig `config:"metrics"`
|
||||
JobExpireDuration time.Duration `config:"rc_job_expire_duration"`
|
||||
JobExpireInterval time.Duration `config:"rc_job_expire_interval"`
|
||||
JobExpireDuration fs.Duration `config:"rc_job_expire_duration"`
|
||||
JobExpireInterval fs.Duration `config:"rc_job_expire_interval"`
|
||||
}
|
||||
|
||||
// Opt is the default values used for Options
|
||||
|
||||
@@ -194,7 +194,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
return nil, err
|
||||
}
|
||||
if ci.MaxDuration > 0 {
|
||||
s.maxDurationEndTime = time.Now().Add(ci.MaxDuration)
|
||||
s.maxDurationEndTime = time.Now().Add(time.Duration(ci.MaxDuration))
|
||||
fs.Infof(s.fdst, "Transfer session %v deadline: %s", ci.CutoffMode, s.maxDurationEndTime.Format("2006/01/02 15:04:05"))
|
||||
}
|
||||
// If a max session duration has been defined add a deadline
|
||||
|
||||
@@ -1467,7 +1467,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
r.CheckRemoteItems(t, oneO, twoO, threeO, fourO)
|
||||
|
||||
ci.UpdateOlder = true
|
||||
ci.ModifyWindow = fs.ModTimeNotSupported
|
||||
ci.ModifyWindow = fs.Duration(fs.ModTimeNotSupported)
|
||||
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
@@ -1497,7 +1497,7 @@ func testSyncWithMaxDuration(t *testing.T, cutoffMode fs.CutoffMode) {
|
||||
}
|
||||
r := fstest.NewRun(t)
|
||||
|
||||
maxDuration := 250 * time.Millisecond
|
||||
maxDuration := fs.Duration(250 * time.Millisecond)
|
||||
ci.MaxDuration = maxDuration
|
||||
ci.CutoffMode = cutoffMode
|
||||
ci.CheckFirst = true
|
||||
@@ -1539,7 +1539,7 @@ func testSyncWithMaxDuration(t *testing.T, cutoffMode fs.CutoffMode) {
|
||||
const maxTransferTime = 20 * time.Second
|
||||
|
||||
what := fmt.Sprintf("expecting elapsed time %v between %v and %v", elapsed, maxDuration, maxTransferTime)
|
||||
assert.True(t, elapsed >= maxDuration, what)
|
||||
assert.True(t, elapsed >= time.Duration(maxDuration), what)
|
||||
assert.True(t, elapsed < maxTransferTime, what)
|
||||
}
|
||||
|
||||
|
||||
@@ -67,13 +67,13 @@ backends:
|
||||
# maxfile: 10k
|
||||
# ignore:
|
||||
# - TestApplyTransforms
|
||||
- backend: "chunker"
|
||||
remote: "TestChunkerChunk50bYandex:"
|
||||
fastlist: true
|
||||
maxfile: 1k
|
||||
ignore:
|
||||
# Needs investigation
|
||||
- TestDeduplicateNewestByHash
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerChunk50bYandex:"
|
||||
# fastlist: true
|
||||
# maxfile: 1k
|
||||
# ignore:
|
||||
# # Needs investigation
|
||||
# - TestDeduplicateNewestByHash
|
||||
# - backend: "chunker"
|
||||
# remote: "TestChunkerChunk50bBox:"
|
||||
# fastlist: true
|
||||
@@ -602,6 +602,13 @@ backends:
|
||||
- KRB5CCNAME=/tmp/rclone_krb5/ccache
|
||||
ignoretests:
|
||||
- cmd/gitannex
|
||||
- backend: "smb"
|
||||
remote: "TestSMBKerberosCcache:rclone"
|
||||
fastlist: false
|
||||
env:
|
||||
- KRB5_CONFIG=/tmp/rclone_krb5_ccache/krb5.conf
|
||||
ignoretests:
|
||||
- cmd/gitannex
|
||||
- backend: "storj"
|
||||
remote: "TestStorj:"
|
||||
fastlist: true
|
||||
|
||||
@@ -30,6 +30,8 @@ They should be bound to localhost so they are not accessible externally.
|
||||
| 28634 | TestSMBKerberos |
|
||||
| 28635 | TestS3Exaba |
|
||||
| 28636 | TestS3Exaba |
|
||||
| 28637 | TestSMBKerberosCcache |
|
||||
| 28638 | TestSMBKerberosCcache |
|
||||
| 38081 | TestWebdavOwncloud |
|
||||
|
||||
## Non localhost tests
|
||||
|
||||
@@ -37,6 +37,8 @@ RUN rm -rf /etc/samba/smb.conf /var/lib/samba \
|
||||
&& samba-tool user setexpiry $USER --noexpiry \
|
||||
&& mkdir -m 777 /share /rclone \
|
||||
&& cat <<EOS >> /etc/samba/smb.conf
|
||||
[global]
|
||||
server signing = auto
|
||||
[public]
|
||||
path = /share
|
||||
browseable = yes
|
||||
|
||||
85
fstest/testserver/init.d/TestSMBKerberosCcache
Executable file
85
fstest/testserver/init.d/TestSMBKerberosCcache
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# Set default location for Kerberos config and ccache. Can be overridden by the caller
|
||||
# using environment variables RCLONE_TEST_CUSTOM_CCACHE_LOCATION and KRB5_CONFIG.
|
||||
export TEMP_DIR=/tmp/rclone_krb5_ccache
|
||||
mkdir -p "${TEMP_DIR}"
|
||||
export KRB5_CONFIG=${KRB5_CONFIG:-${TEMP_DIR}/krb5.conf}
|
||||
export RCLONE_TEST_CUSTOM_CCACHE_LOCATION=${RCLONE_TEST_CUSTOM_CCACHE_LOCATION:-${TEMP_DIR}/ccache}
|
||||
|
||||
IMAGE=rclone/test-smb-kerberos-ccache
|
||||
NAME=smb-kerberos-ccache
|
||||
USER=rclone
|
||||
DOMAIN=RCLONE
|
||||
REALM=RCLONE.LOCAL
|
||||
SMB_PORT=28637
|
||||
KRB5_PORT=28638
|
||||
|
||||
. $(dirname "$0")/docker.bash
|
||||
|
||||
start() {
|
||||
docker build -t ${IMAGE} --load - <<EOF
|
||||
FROM alpine:3.21
|
||||
RUN apk add --no-cache samba-dc
|
||||
RUN rm -rf /etc/samba/smb.conf /var/lib/samba \
|
||||
&& mkdir -p /var/lib/samba/private \
|
||||
&& samba-tool domain provision \
|
||||
--use-rfc2307 \
|
||||
--option acl_xattr:security_acl_name=user.NTACL \
|
||||
--realm=$REALM \
|
||||
--domain=$DOMAIN \
|
||||
--server-role=dc \
|
||||
--dns-backend=SAMBA_INTERNAL \
|
||||
--host-name=localhost \
|
||||
&& samba-tool user add --random-password $USER \
|
||||
&& samba-tool user setexpiry $USER --noexpiry \
|
||||
&& mkdir -m 777 /share /rclone \
|
||||
&& cat <<EOS >> /etc/samba/smb.conf
|
||||
[global]
|
||||
server signing = auto
|
||||
[public]
|
||||
path = /share
|
||||
browseable = yes
|
||||
read only = yes
|
||||
guest ok = yes
|
||||
[rclone]
|
||||
path = /rclone
|
||||
browseable = yes
|
||||
read only = no
|
||||
guest ok = no
|
||||
valid users = rclone
|
||||
EOS
|
||||
CMD ["samba", "-i"]
|
||||
EOF
|
||||
|
||||
docker run --rm -d --name ${NAME} \
|
||||
-p 127.0.0.1:${SMB_PORT}:445 \
|
||||
-p 127.0.0.1:${SMB_PORT}:445/udp \
|
||||
-p 127.0.0.1:${KRB5_PORT}:88 \
|
||||
${IMAGE}
|
||||
|
||||
cat > "${KRB5_CONFIG}" <<EOF
|
||||
[libdefaults]
|
||||
default_realm = ${REALM}
|
||||
[realms]
|
||||
${REALM} = {
|
||||
kdc = localhost
|
||||
}
|
||||
EOF
|
||||
|
||||
docker cp "${KRB5_CONFIG}" ${NAME}:/etc/krb5.conf
|
||||
docker exec ${NAME} samba-tool user get-kerberos-ticket rclone --output-krb5-ccache=/tmp/ccache
|
||||
docker cp ${NAME}:/tmp/ccache "${RCLONE_TEST_CUSTOM_CCACHE_LOCATION}"
|
||||
sed -i -e "s/localhost/localhost:${KRB5_PORT}/" "${KRB5_CONFIG}"
|
||||
|
||||
echo type=smb
|
||||
echo host=localhost
|
||||
echo port=$SMB_PORT
|
||||
echo use_kerberos=true
|
||||
echo kerberos_ccache=${RCLONE_TEST_CUSTOM_CCACHE_LOCATION}
|
||||
echo _connect=127.0.0.1:${SMB_PORT}
|
||||
}
|
||||
|
||||
. $(dirname "$0")/run.bash
|
||||
@@ -120,11 +120,11 @@ var ConfigInfo = fs.Options{{
|
||||
Help: "IPaddress:Port or :Port to bind server to",
|
||||
}, {
|
||||
Name: "server_read_timeout",
|
||||
Default: 1 * time.Hour,
|
||||
Default: fs.Duration(1 * time.Hour),
|
||||
Help: "Timeout for server reading data",
|
||||
}, {
|
||||
Name: "server_write_timeout",
|
||||
Default: 1 * time.Hour,
|
||||
Default: fs.Duration(1 * time.Hour),
|
||||
Help: "Timeout for server writing data",
|
||||
}, {
|
||||
Name: "max_header_bytes",
|
||||
@@ -158,25 +158,25 @@ var ConfigInfo = fs.Options{{
|
||||
|
||||
// Config contains options for the http Server
|
||||
type Config struct {
|
||||
ListenAddr []string `config:"addr"` // Port to listen on
|
||||
BaseURL string `config:"baseurl"` // prefix to strip from URLs
|
||||
ServerReadTimeout time.Duration `config:"server_read_timeout"` // Timeout for server reading data
|
||||
ServerWriteTimeout time.Duration `config:"server_write_timeout"` // Timeout for server writing data
|
||||
MaxHeaderBytes int `config:"max_header_bytes"` // Maximum size of request header
|
||||
TLSCert string `config:"cert"` // Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
|
||||
TLSKey string `config:"key"` // Path to TLS PEM private key file
|
||||
TLSCertBody []byte `config:"-"` // TLS PEM public key certificate body (can also include intermediate/CA certificates), ignores TLSCert
|
||||
TLSKeyBody []byte `config:"-"` // TLS PEM private key body, ignores TLSKey
|
||||
ClientCA string `config:"client_ca"` // Path to TLS PEM CA file with certificate authorities to verify clients with
|
||||
MinTLSVersion string `config:"min_tls_version"` // MinTLSVersion contains the minimum TLS version that is acceptable.
|
||||
AllowOrigin string `config:"allow_origin"` // AllowOrigin sets the Access-Control-Allow-Origin header
|
||||
ListenAddr []string `config:"addr"` // Port to listen on
|
||||
BaseURL string `config:"baseurl"` // prefix to strip from URLs
|
||||
ServerReadTimeout fs.Duration `config:"server_read_timeout"` // Timeout for server reading data
|
||||
ServerWriteTimeout fs.Duration `config:"server_write_timeout"` // Timeout for server writing data
|
||||
MaxHeaderBytes int `config:"max_header_bytes"` // Maximum size of request header
|
||||
TLSCert string `config:"cert"` // Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
|
||||
TLSKey string `config:"key"` // Path to TLS PEM private key file
|
||||
TLSCertBody []byte `config:"-"` // TLS PEM public key certificate body (can also include intermediate/CA certificates), ignores TLSCert
|
||||
TLSKeyBody []byte `config:"-"` // TLS PEM private key body, ignores TLSKey
|
||||
ClientCA string `config:"client_ca"` // Path to TLS PEM CA file with certificate authorities to verify clients with
|
||||
MinTLSVersion string `config:"min_tls_version"` // MinTLSVersion contains the minimum TLS version that is acceptable.
|
||||
AllowOrigin string `config:"allow_origin"` // AllowOrigin sets the Access-Control-Allow-Origin header
|
||||
}
|
||||
|
||||
// AddFlagsPrefix adds flags for the httplib
|
||||
func (cfg *Config) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
|
||||
flags.StringArrayVarP(flagSet, &cfg.ListenAddr, prefix+"addr", "", cfg.ListenAddr, "IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to", prefix)
|
||||
flags.DurationVarP(flagSet, &cfg.ServerReadTimeout, prefix+"server-read-timeout", "", cfg.ServerReadTimeout, "Timeout for server reading data", prefix)
|
||||
flags.DurationVarP(flagSet, &cfg.ServerWriteTimeout, prefix+"server-write-timeout", "", cfg.ServerWriteTimeout, "Timeout for server writing data", prefix)
|
||||
flags.FVarP(flagSet, &cfg.ServerReadTimeout, prefix+"server-read-timeout", "", "Timeout for server reading data", prefix)
|
||||
flags.FVarP(flagSet, &cfg.ServerWriteTimeout, prefix+"server-write-timeout", "", "Timeout for server writing data", prefix)
|
||||
flags.IntVarP(flagSet, &cfg.MaxHeaderBytes, prefix+"max-header-bytes", "", cfg.MaxHeaderBytes, "Maximum size of request header", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.TLSCert, prefix+"cert", "", cfg.TLSCert, "Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.TLSKey, prefix+"key", "", cfg.TLSKey, "Path to TLS PEM private key file", prefix)
|
||||
@@ -198,8 +198,8 @@ func AddHTTPFlagsPrefix(flagSet *pflag.FlagSet, prefix string, cfg *Config) {
|
||||
func DefaultCfg() Config {
|
||||
return Config{
|
||||
ListenAddr: []string{"127.0.0.1:8080"},
|
||||
ServerReadTimeout: 1 * time.Hour,
|
||||
ServerWriteTimeout: 1 * time.Hour,
|
||||
ServerReadTimeout: fs.Duration(1 * time.Hour),
|
||||
ServerWriteTimeout: fs.Duration(1 * time.Hour),
|
||||
MaxHeaderBytes: 4096,
|
||||
MinTLSVersion: "tls1.0",
|
||||
}
|
||||
@@ -273,8 +273,8 @@ func newInstance(ctx context.Context, s *Server, listener net.Listener, tlsCfg *
|
||||
listener: listener,
|
||||
httpServer: &http.Server{
|
||||
Handler: s.mux,
|
||||
ReadTimeout: s.cfg.ServerReadTimeout,
|
||||
WriteTimeout: s.cfg.ServerWriteTimeout,
|
||||
ReadTimeout: time.Duration(s.cfg.ServerReadTimeout),
|
||||
WriteTimeout: time.Duration(s.cfg.ServerWriteTimeout),
|
||||
MaxHeaderBytes: s.cfg.MaxHeaderBytes,
|
||||
ReadHeaderTimeout: 10 * time.Second, // time to send the headers
|
||||
IdleTimeout: 60 * time.Second, // time to keep idle connections open
|
||||
|
||||
@@ -79,7 +79,7 @@ func Start(ctx context.Context, facility string, f fs.Fs) (*DB, error) {
|
||||
}
|
||||
|
||||
name := makeName(facility, f)
|
||||
lockTime := fs.GetConfig(ctx).KvLockTime
|
||||
lockTime := time.Duration(fs.GetConfig(ctx).KvLockTime)
|
||||
|
||||
db := &DB{
|
||||
name: name,
|
||||
|
||||
Reference in New Issue
Block a user