mirror of
https://github.com/bitwarden/server
synced 2026-02-09 05:00:32 +00:00
Configurable batch lookup size
this is low for now because under normal operations you'll be sharing a single identity, which will require single digits of lookups at once. Testing and use-case may warrant an increase, eventually.
This commit is contained in:
@@ -12,12 +12,19 @@ pub struct ApplicationConfig {
|
||||
/// The unique Bitwarden installation ID using this AKD reader instance.
|
||||
/// This value is used to namespace AKD data to a given installation.
|
||||
pub installation_id: Uuid,
|
||||
/// Maximum number of labels allowed in a single batch lookup request. Defaults to 10.
|
||||
#[serde(default = "default_max_batch_lookup_size")]
|
||||
pub max_batch_lookup_size: usize,
|
||||
}
|
||||
|
||||
fn default_web_server_bind_address() -> String {
|
||||
"127.0.0.1:3001".to_string()
|
||||
}
|
||||
|
||||
fn default_max_batch_lookup_size() -> usize {
|
||||
10
|
||||
}
|
||||
|
||||
impl ApplicationConfig {
|
||||
/// Load configuration from multiple sources in order of priority:
|
||||
/// 1. Environment variables (prefixed with AKD_READER) - always applied with highest priority
|
||||
|
||||
@@ -20,6 +20,7 @@ struct AppState {
|
||||
directory: ReadOnlyDirectory<BitwardenV1Configuration, AkdDatabase, VrfKeyDatabase>,
|
||||
// TODO: use this to allow for unique failures for lookup and key history requests that have pending updates
|
||||
// publish_queue: ReadOnlyPublishQueueType,
|
||||
max_batch_lookup_size: usize,
|
||||
}
|
||||
|
||||
#[instrument(skip_all, name = "reader_start")]
|
||||
@@ -35,10 +36,12 @@ pub async fn start(
|
||||
|
||||
let mut shutdown_rx = shutdown_rx.resubscribe();
|
||||
|
||||
let max_batch_lookup_size = config.max_batch_lookup_size;
|
||||
let handle = tokio::spawn(async move {
|
||||
let app_state = AppState {
|
||||
directory: directory,
|
||||
// publish_queue: publish_queue,
|
||||
max_batch_lookup_size,
|
||||
};
|
||||
|
||||
let app = Router::new()
|
||||
|
||||
@@ -22,7 +22,11 @@ pub struct BatchLookupData {
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn batch_lookup_handler(
|
||||
State(AppState { directory, .. }): State<AppState>,
|
||||
State(AppState {
|
||||
directory,
|
||||
max_batch_lookup_size,
|
||||
..
|
||||
}): State<AppState>,
|
||||
Json(BatchLookupRequest { labels_b64 }): Json<BatchLookupRequest>,
|
||||
) -> (StatusCode, Json<Response<BatchLookupData>>) {
|
||||
info!("Handling batch lookup request");
|
||||
@@ -37,18 +41,16 @@ pub async fn batch_lookup_handler(
|
||||
}
|
||||
|
||||
// Validate batch size
|
||||
// TODO: make this configurable
|
||||
const MAX_BATCH_SIZE: usize = 1000;
|
||||
if labels_b64.len() > MAX_BATCH_SIZE {
|
||||
if labels_b64.len() > max_batch_lookup_size {
|
||||
error!(
|
||||
batch_size = labels_b64.len(),
|
||||
max_size = MAX_BATCH_SIZE,
|
||||
max_size = max_batch_lookup_size,
|
||||
"Batch size exceeds limit"
|
||||
);
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
Json(Response::error(ReaderError::BatchTooLarge {
|
||||
limit: MAX_BATCH_SIZE,
|
||||
limit: max_batch_lookup_size,
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user