1
0
mirror of https://github.com/bitwarden/server synced 2026-02-09 21:20:01 +00:00

Improve error handling

This commit is contained in:
Matt Gibson
2026-01-20 16:09:16 -08:00
parent 714d2c053a
commit 5c87afdb55
12 changed files with 327 additions and 61 deletions

1
akd/Cargo.lock generated
View File

@@ -2264,6 +2264,7 @@ dependencies = [
"common",
"config",
"serde",
"thiserror 2.0.17",
"tokio",
"tracing",
"tracing-subscriber",

View File

@@ -248,4 +248,18 @@ mod tests {
assert_eq!(hasher1.finish(), hasher2.finish());
}
#[test]
fn test_b64_from_json() {
let json_data = "\"SGVsbG8sIFdvcmxkIQ==\"";
let b64: B64 = serde_json::from_str(json_data).unwrap();
assert_eq!(b64.as_bytes(), b"Hello, World!");
}
#[test]
fn test_not_b64_from_json_fails() {
let json_data = "\"InvalidBase64@@@\"";
let result: Result<B64, _> = serde_json::from_str(json_data);
assert!(result.is_err());
}
}

View File

@@ -21,6 +21,7 @@ uuid = { workspace = true }
axum = { workspace = true }
chrono = { workspace = true }
bitwarden-encoding = { workspace = true }
thiserror = { workspace = true }
[lints]
workspace = true

View File

@@ -0,0 +1,223 @@
use akd::errors::AkdError;
use axum::http::StatusCode;
use serde::{Deserialize, Serialize};
use thiserror::Error;
/// Main error type for the Reader API
///
/// Note: Base64 validation for input fields happens automatically during JSON
/// deserialization via the `bitwarden_encoding::B64` type, so invalid base64
/// will be rejected before reaching the handlers with a 400 Bad Request error.
#[derive(Error, Debug)]
pub enum ReaderError {
// Application-level validation errors (4xx)
#[error("Invalid epoch range: start_epoch ({start_epoch}) must be <= end_epoch ({end_epoch})")]
InvalidEpochRange { start_epoch: u64, end_epoch: u64 },
#[error("Empty batch request")]
EmptyBatch,
#[error("Batch size limit exceeded: {limit}")]
BatchTooLarge { limit: usize },
// AKD library errors
#[error("AKD error: {0}")]
Akd(#[from] AkdError),
}
/// Error response structure sent to clients
#[derive(Debug, Serialize, Deserialize)]
pub struct ErrorResponse {
/// Machine-readable error code for client-side matching
pub code: ErrorCode,
/// Human-readable error message
pub message: String,
}
/// Machine-readable error codes for reliable client-side parsing
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum ErrorCode {
// Application-level validation errors (400-level)
InvalidEpochRange,
EmptyBatch,
BatchTooLarge,
// AKD-specific errors
AkdTreeNode,
AkdVerification,
AkdInvalidEpoch,
AkdReadOnlyDirectory,
AkdPublish,
AkdAzks,
AkdVrf,
AkdStorageNotFound,
AkdStorageTransaction,
AkdStorageConnection,
AkdStorageOther,
AkdAudit,
AkdParallelism,
// Server errors (5xx)
InternalError,
}
impl ReaderError {
/// Map error to appropriate HTTP status code
pub fn status_code(&self) -> StatusCode {
match self {
// 400-level errors
ReaderError::InvalidEpochRange { .. } => StatusCode::BAD_REQUEST,
ReaderError::EmptyBatch => StatusCode::BAD_REQUEST,
ReaderError::BatchTooLarge { .. } => StatusCode::BAD_REQUEST,
// AKD errors - nuanced mapping
ReaderError::Akd(akd_err) => match akd_err {
AkdError::Storage(storage_err) => match storage_err {
akd::errors::StorageError::NotFound(_) => StatusCode::NOT_FOUND,
akd::errors::StorageError::Transaction(_) => StatusCode::INTERNAL_SERVER_ERROR,
akd::errors::StorageError::Connection(_) => StatusCode::SERVICE_UNAVAILABLE,
akd::errors::StorageError::Other(_) => StatusCode::INTERNAL_SERVER_ERROR,
},
AkdError::Directory(dir_err) => match dir_err {
akd::errors::DirectoryError::InvalidEpoch(_) => StatusCode::BAD_REQUEST,
akd::errors::DirectoryError::Verification(_) => {
StatusCode::UNPROCESSABLE_ENTITY
}
akd::errors::DirectoryError::ReadOnlyDirectory(_) => StatusCode::FORBIDDEN,
akd::errors::DirectoryError::Publish(_) => StatusCode::INTERNAL_SERVER_ERROR,
},
AkdError::TreeNode(_) => StatusCode::INTERNAL_SERVER_ERROR,
AkdError::AzksErr(_) => StatusCode::INTERNAL_SERVER_ERROR,
AkdError::Vrf(_) => StatusCode::INTERNAL_SERVER_ERROR,
AkdError::AuditErr(_) => StatusCode::INTERNAL_SERVER_ERROR,
AkdError::Parallelism(_) => StatusCode::INTERNAL_SERVER_ERROR,
AkdError::TestErr(_) => StatusCode::INTERNAL_SERVER_ERROR,
},
}
}
/// Convert error to error code for client parsing
pub fn error_code(&self) -> ErrorCode {
match self {
ReaderError::InvalidEpochRange { .. } => ErrorCode::InvalidEpochRange,
ReaderError::EmptyBatch => ErrorCode::EmptyBatch,
ReaderError::BatchTooLarge { .. } => ErrorCode::BatchTooLarge,
ReaderError::Akd(akd_err) => Self::akd_error_code(akd_err),
}
}
/// Map AkdError to specific error code
fn akd_error_code(err: &AkdError) -> ErrorCode {
match err {
AkdError::TreeNode(_) => ErrorCode::AkdTreeNode,
AkdError::Directory(dir_err) => match dir_err {
akd::errors::DirectoryError::Verification(_) => ErrorCode::AkdVerification,
akd::errors::DirectoryError::InvalidEpoch(_) => ErrorCode::AkdInvalidEpoch,
akd::errors::DirectoryError::ReadOnlyDirectory(_) => {
ErrorCode::AkdReadOnlyDirectory
}
akd::errors::DirectoryError::Publish(_) => ErrorCode::AkdPublish,
},
AkdError::AzksErr(_) => ErrorCode::AkdAzks,
AkdError::Vrf(_) => ErrorCode::AkdVrf,
AkdError::Storage(storage_err) => match storage_err {
akd::errors::StorageError::NotFound(_) => ErrorCode::AkdStorageNotFound,
akd::errors::StorageError::Transaction(_) => ErrorCode::AkdStorageTransaction,
akd::errors::StorageError::Connection(_) => ErrorCode::AkdStorageConnection,
akd::errors::StorageError::Other(_) => ErrorCode::AkdStorageOther,
},
AkdError::AuditErr(_) => ErrorCode::AkdAudit,
AkdError::Parallelism(_) => ErrorCode::AkdParallelism,
AkdError::TestErr(_) => ErrorCode::InternalError,
}
}
/// Convert error to ErrorResponse
pub fn to_error_response(&self) -> ErrorResponse {
ErrorResponse {
code: self.error_code(),
message: self.to_string(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_akd_not_found_maps_to_404() {
let storage_err = akd::errors::StorageError::NotFound("test".to_string());
let akd_err = AkdError::Storage(storage_err);
let reader_err = ReaderError::from(akd_err);
assert_eq!(reader_err.status_code(), StatusCode::NOT_FOUND);
assert!(matches!(
reader_err.error_code(),
ErrorCode::AkdStorageNotFound
));
}
#[test]
fn test_empty_batch_maps_to_400() {
let err = ReaderError::EmptyBatch;
assert_eq!(err.status_code(), StatusCode::BAD_REQUEST);
assert!(matches!(err.error_code(), ErrorCode::EmptyBatch));
}
#[test]
fn test_batch_too_large_maps_to_400() {
let err = ReaderError::BatchTooLarge { limit: 1000 };
assert_eq!(err.status_code(), StatusCode::BAD_REQUEST);
assert!(matches!(err.error_code(), ErrorCode::BatchTooLarge));
}
#[test]
fn test_invalid_epoch_range_maps_to_400() {
let err = ReaderError::InvalidEpochRange {
start_epoch: 0,
end_epoch: 0,
};
assert_eq!(err.status_code(), StatusCode::BAD_REQUEST);
assert!(matches!(err.error_code(), ErrorCode::InvalidEpochRange));
}
#[test]
fn test_error_response_serialization() {
let err = ReaderError::InvalidEpochRange {
start_epoch: 0,
end_epoch: 0,
};
let response = err.to_error_response();
assert!(matches!(response.code, ErrorCode::InvalidEpochRange));
assert!(response.message.contains("invalid range"));
}
#[test]
fn test_akd_invalid_epoch_error_maps_to_400() {
let dir_err = akd::errors::DirectoryError::InvalidEpoch("invalid epoch".to_string());
let akd_err = AkdError::Directory(dir_err);
let reader_err = ReaderError::from(akd_err);
assert_eq!(reader_err.status_code(), StatusCode::BAD_REQUEST);
assert!(matches!(
reader_err.error_code(),
ErrorCode::AkdInvalidEpoch
));
}
#[test]
fn test_akd_connection_error_maps_to_503() {
let storage_err = akd::errors::StorageError::Connection("connection failed".to_string());
let akd_err = AkdError::Storage(storage_err);
let reader_err = ReaderError::from(akd_err);
assert_eq!(reader_err.status_code(), StatusCode::SERVICE_UNAVAILABLE);
assert!(matches!(
reader_err.error_code(),
ErrorCode::AkdStorageConnection
));
}
}

View File

@@ -7,9 +7,11 @@ use tokio::{net::TcpListener, sync::broadcast::Receiver};
use tracing::{info, instrument};
mod config;
pub mod error;
mod routes;
pub use crate::config::ApplicationConfig;
pub use error::{ErrorCode, ErrorResponse, ReaderError};
pub use routes::response_types;
#[derive(Clone)]

View File

@@ -2,7 +2,7 @@ use axum::{extract::State, http::StatusCode, Json};
use serde::{Deserialize, Serialize};
use tracing::{error, info, instrument};
use crate::{routes::Response, AppState};
use crate::{error::ReaderError, routes::Response, AppState};
#[derive(Debug, Serialize, Deserialize)]
pub struct AuditRequest {
@@ -24,13 +24,31 @@ pub async fn audit_handler(
start_epoch,
end_epoch, "Handling epoch audit request request"
);
// Validate epoch range
if start_epoch > end_epoch {
error!(
start_epoch,
end_epoch, "Invalid epoch range: start_epoch must be <= end_epoch"
);
return (
StatusCode::BAD_REQUEST,
Json(Response::error(ReaderError::InvalidEpochRange {
start_epoch,
end_epoch,
})),
);
}
let audit_proof = directory.audit(start_epoch, end_epoch).await;
match audit_proof {
Ok(audit_proof) => (StatusCode::OK, Json(Response::success(audit_proof))),
Err(e) => {
error!(err = ?e, "Failed to perform epoch audit");
(StatusCode::INTERNAL_SERVER_ERROR, Json(Response::fail(e)))
let reader_error = ReaderError::Akd(e);
let status = reader_error.status_code();
error!(err = ?reader_error, status = %status, "Failed to perform epoch audit");
(status, Json(Response::error(reader_error)))
}
}
}

View File

@@ -3,6 +3,7 @@ use serde::{Deserialize, Serialize};
use tracing::{error, info, instrument};
use crate::{
error::ReaderError,
routes::{get_epoch_hash::EpochData, lookup::AkdLabelB64, Response},
AppState,
};
@@ -25,6 +26,33 @@ pub async fn batch_lookup_handler(
Json(BatchLookupRequest { labels_b64 }): Json<BatchLookupRequest>,
) -> (StatusCode, Json<Response<BatchLookupData>>) {
info!("Handling batch lookup request");
// Validate batch not empty
if labels_b64.is_empty() {
error!("Empty batch request received");
return (
StatusCode::BAD_REQUEST,
Json(Response::error(ReaderError::EmptyBatch)),
);
}
// Validate batch size
// TODO: make this configurable
const MAX_BATCH_SIZE: usize = 1000;
if labels_b64.len() > MAX_BATCH_SIZE {
error!(
batch_size = labels_b64.len(),
max_size = MAX_BATCH_SIZE,
"Batch size exceeds limit"
);
return (
StatusCode::BAD_REQUEST,
Json(Response::error(ReaderError::BatchTooLarge {
limit: MAX_BATCH_SIZE,
})),
);
}
let labels = labels_b64
.into_iter()
.map(|label_b64| label_b64.into())
@@ -40,8 +68,10 @@ pub async fn batch_lookup_handler(
})),
),
Err(e) => {
error!(err = ?e, "Failed to perform batch lookup");
(StatusCode::INTERNAL_SERVER_ERROR, Json(Response::fail(e)))
let reader_error = ReaderError::Akd(e);
let status = reader_error.status_code();
error!(err = ?reader_error, status = %status, "Failed to perform batch lookup");
(status, Json(Response::error(reader_error)))
}
}
}

View File

@@ -3,7 +3,7 @@ use axum::{extract::State, http::StatusCode, Json};
use serde::{Deserialize, Serialize};
use tracing::{error, info, instrument};
use crate::{routes::Response, AppState};
use crate::{error::ReaderError, routes::Response, AppState};
#[derive(Debug, Serialize, Deserialize)]
pub struct EpochData {
@@ -33,8 +33,10 @@ pub async fn get_epoch_hash_handler(
Json(Response::<EpochData>::success(epoch_hash.into())),
),
Err(e) => {
error!(err = ?e, "Failed to get current AKD epoch hash");
(StatusCode::INTERNAL_SERVER_ERROR, Json(Response::fail(e)))
let reader_error = ReaderError::Akd(e);
let status = reader_error.status_code();
error!(err = ?reader_error, status = %status, "Failed to get current AKD epoch hash");
(status, Json(Response::error(reader_error)))
}
}
}

View File

@@ -1,7 +1,7 @@
use axum::{extract::State, http::StatusCode, Json};
use tracing::{error, info, instrument};
use crate::{routes::Response, AppState};
use crate::{error::ReaderError, routes::Response, AppState};
/// Public key encoded as a base64 string
pub type PublicKeyData = bitwarden_encoding::B64;
@@ -19,8 +19,10 @@ pub async fn get_public_key_handler(
Json(Response::success(public_key.as_ref().into())),
),
Err(e) => {
error!(err = ?e, "Failed to get AKD public key");
(StatusCode::INTERNAL_SERVER_ERROR, Json(Response::fail(e)))
let reader_error = ReaderError::Akd(e);
let status = reader_error.status_code();
error!(err = ?reader_error, status = %status, "Failed to get AKD public key");
(status, Json(Response::error(reader_error)))
}
}
}

View File

@@ -3,6 +3,7 @@ use serde::{Deserialize, Serialize};
use tracing::{error, info, instrument};
use crate::{
error::ReaderError,
routes::{get_epoch_hash::EpochData, Response},
AppState,
};
@@ -58,13 +59,15 @@ pub async fn key_history_handler(
Ok((history_proof, epoch_hash)) => (
StatusCode::OK,
Json(Response::success(HistoryData {
history_proof: history_proof,
history_proof,
epoch_data: epoch_hash.into(),
})),
),
Err(e) => {
error!(err = ?e, "Failed to get key history");
(StatusCode::INTERNAL_SERVER_ERROR, Json(Response::fail(e)))
let reader_error = ReaderError::Akd(e);
let status = reader_error.status_code();
error!(err = ?reader_error, status = %status, "Failed to get key history");
(status, Json(Response::error(reader_error)))
}
}
}

View File

@@ -3,12 +3,13 @@ use serde::{Deserialize, Serialize};
use tracing::{error, info, instrument};
use crate::{
error::ReaderError,
routes::{get_epoch_hash::EpochData, Response},
AppState,
};
#[derive(Debug, Serialize, Deserialize)]
pub struct AkdLabelB64(bitwarden_encoding::B64);
pub struct AkdLabelB64(pub(crate) bitwarden_encoding::B64);
impl From<AkdLabelB64> for akd::AkdLabel {
fn from(label_b64: AkdLabelB64) -> Self {
@@ -45,8 +46,10 @@ pub async fn lookup_handler(
})),
),
Err(e) => {
error!(err = ?e, "Failed to perform lookup");
(StatusCode::INTERNAL_SERVER_ERROR, Json(Response::fail(e)))
let reader_error = ReaderError::Akd(e);
let status = reader_error.status_code();
error!(err = ?reader_error, status = %status, "Failed to perform lookup");
(status, Json(Response::error(reader_error)))
}
}
}

View File

@@ -1,7 +1,8 @@
use akd::errors::AkdError;
use axum::routing::get;
use serde::{Deserialize, Serialize};
use crate::error::{ErrorResponse, ReaderError};
mod audit;
mod batch_lookup;
mod get_epoch_hash;
@@ -26,49 +27,15 @@ pub fn api_routes() -> axum::Router<AppState> {
#[derive(Debug, Serialize, Deserialize)]
pub struct Response<T> {
success: bool,
data: Option<T>,
error: Option<ResponseError>,
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<T>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponse>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ResponseError {
pub akd_error_type: String,
pub message: String,
}
impl From<AkdError> for ResponseError {
fn from(err: AkdError) -> Self {
ResponseError {
akd_error_type: match &err {
AkdError::TreeNode(_) => "TreeNode".to_string(),
AkdError::Directory(err) => match err {
akd::errors::DirectoryError::Verification(_) => "VerificationError".to_string(),
akd::errors::DirectoryError::InvalidEpoch(_) => "InvalidEpoch".to_string(),
akd::errors::DirectoryError::ReadOnlyDirectory(_) => {
"ReadOnlyDirectory".to_string()
}
akd::errors::DirectoryError::Publish(_) => "Publish".to_string(),
},
AkdError::AzksErr(_) => "AzksErr".to_string(),
AkdError::Vrf(_) => "Vrf".to_string(),
AkdError::Storage(err) => match err {
akd::errors::StorageError::NotFound(_) => "NotFound".to_string(),
akd::errors::StorageError::Transaction(_) => "Transaction".to_string(),
akd::errors::StorageError::Connection(_) => "Connection".to_string(),
akd::errors::StorageError::Other(_) => "Other".to_string(),
},
AkdError::AuditErr(_) => "AuditErr".to_string(),
AkdError::Parallelism(_) => "Parallelism".to_string(),
AkdError::TestErr(_) => "TestErr".to_string(),
},
message: err.to_string(),
}
}
}
impl<'a, T: Serialize + Deserialize<'a>> Response<T> {
fn success(data: T) -> Self {
impl<T: Serialize> Response<T> {
pub fn success(data: T) -> Self {
Self {
success: true,
data: Some(data),
@@ -76,11 +43,11 @@ impl<'a, T: Serialize + Deserialize<'a>> Response<T> {
}
}
fn fail(err: AkdError) -> Self {
pub fn error(err: ReaderError) -> Self {
Self {
success: false,
data: None,
error: Some(err.into()),
error: Some(err.to_error_response()),
}
}
}