1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

3 Commits

Author SHA1 Message Date
Duncan Smart
886ac7af1d docs: Clarify OAuth scopes for readonly Google Drive access 2025-11-24 15:58:53 +00:00
Diana
3c40238f02 b2: support authentication with new bucket restricted application keys
Backblaze has updated its b2_authorize_account API endpoint, newly created
application keys are now "multi-bucket" keys, capable of being limited to
multiple buckets. These keys can only be used with the v4 endpoint, not v1 which
returns an HTTP 400.

This commit switches authorization to the v4 endpoint, and allowing such keys to
work with any of the allowed buckets.

With multi-bucket keys, missing restricted buckets can be non-fatal.

Supports listing root with multi-bucket API keys
2025-11-24 15:46:41 +00:00
Nick Craig-Wood
46ca0dd7fe docs: update sponsor logos 2025-11-24 14:58:33 +00:00
5 changed files with 100 additions and 56 deletions

View File

@@ -133,23 +133,32 @@ type File struct {
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
// StorageAPI is as returned from the b2_authorize_account call
type StorageAPI struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
Buckets []struct { // When present, access is restricted to one or more buckets.
ID string `json:"id"` // ID of bucket
Name string `json:"name"` // When present, name of bucket - may be empty
} `json:"buckets"`
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
Storage StorageAPI `json:"storageApi"`
} `json:"apiInfo"`
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.

View File

@@ -607,17 +607,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to authorize account: %w", err)
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
// If this is a key limited to one or more buckets, one of them must exist
// and be ours.
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
buckets := f.info.APIs.Storage.Allowed.Buckets
var rootFound = false
var rootID string
for _, b := range buckets {
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
if allowedBucket == "" {
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
continue
}
if allowedBucket == f.rootBucket {
rootFound = true
rootID = b.ID
}
}
if allowedBucket != f.rootBucket {
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
if !rootFound {
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
}
f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
f.setBucketID(f.rootBucket, rootID)
}
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the (bucket,directory) is actually an existing file
@@ -643,7 +655,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
defer f.authMu.Unlock()
opts := rest.Opts{
Method: "GET",
Path: "/b2api/v1/b2_authorize_account",
Path: "/b2api/v4/b2_authorize_account",
RootURL: f.opt.Endpoint,
UserName: f.opt.Account,
Password: f.opt.Key,
@@ -656,13 +668,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to authenticate: %w", err)
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
}
// hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.Allowed.Capabilities, permission)
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -1067,44 +1079,68 @@ type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
for i := range f.info.APIs.Storage.Allowed.Buckets {
b := &f.info.APIs.Storage.Allowed.Buckets[i]
// Empty names indicate a bucket that no longer exists, this is non-fatal
// for multi-bucket API keys.
if b.Name == "" {
continue
}
// When requesting a specific bucket skip over non-matching names
if bucketName != "" && b.Name != bucketName {
continue
}
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: b.ID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
responses = append(responses, response)
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
f.bucketIDMutex.Lock()
f.bucketTypeMutex.Lock()
f._bucketID = make(map[string]string, 1)
f._bucketType = make(map[string]string, 1)
for i := range response.Buckets {
bucket := &response.Buckets[i]
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
for ri := range responses {
response := &responses[ri]
for i := range response.Buckets {
bucket := &response.Buckets[i]
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
}
}
f.bucketTypeMutex.Unlock()
f.bucketIDMutex.Unlock()
for i := range response.Buckets {
bucket := &response.Buckets[i]
err = fn(bucket)
if err != nil {
return err
for ri := range responses {
response := &responses[ri]
for i := range response.Buckets {
bucket := &response.Buckets[i]
err := fn(bucket)
if err != nil {
return err
}
}
}
return nil
@@ -1606,7 +1642,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
bucket, bucketPath := f.split(remote)
var RootURL string
if f.opt.DownloadURL == "" {
RootURL = f.info.DownloadURL
RootURL = f.info.APIs.Storage.DownloadURL
} else {
RootURL = f.opt.DownloadURL
}
@@ -1957,7 +1993,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.DownloadURL
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
} else {
opts.RootURL = o.fs.opt.DownloadURL
}

View File

@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
All copy commands send the following 4 requests:
```text
/b2api/v1/b2_authorize_account
/b2api/v4/b2_authorize_account
/b2api/v1/b2_create_bucket
/b2api/v1/b2_list_buckets
/b2api/v1/b2_list_file_names

View File

@@ -265,7 +265,7 @@ account key" button.
`https://www.googleapis.com/auth/drive`
to grant read/write access to Google Drive specifically.
You can also use `https://www.googleapis.com/auth/drive.readonly` for read
only access.
only access with `--drive-scope=drive.readonly`.
- Click "Authorise"
##### 3. Configure rclone, assuming a new install

View File

@@ -14,13 +14,12 @@
Platinum Sponsor
</div>
<div class="card-body">
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_300x114.png"></a><br />
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_website.png"></a><br />
<script>
const imgs = [
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_300x114.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_300x114.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_3_300x114.png" },
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_300x114.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_website.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_website.png" },
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_website.png" },
];
const img = imgs[Math.floor(Math.random() * imgs.length)];
document.addEventListener("DOMContentLoaded", () => {