mirror of
https://github.com/bitwarden/server
synced 2026-01-31 00:33:17 +00:00
cargo clippy --fix
This commit is contained in:
1
akd/Cargo.lock
generated
1
akd/Cargo.lock
generated
@@ -555,6 +555,7 @@ dependencies = [
|
||||
"akd_storage",
|
||||
"async-trait",
|
||||
"config",
|
||||
"hex",
|
||||
"serde",
|
||||
"thiserror 2.0.17",
|
||||
"tracing",
|
||||
|
||||
@@ -20,7 +20,7 @@ impl DbConfig {
|
||||
pool_size,
|
||||
} => {
|
||||
let db = crate::ms_sql::MsSql::builder(connection_string.clone())
|
||||
.pool_size(pool_size.clone())
|
||||
.pool_size(*pool_size)
|
||||
.build()
|
||||
.await?;
|
||||
DatabaseType::MsSql(db)
|
||||
|
||||
@@ -75,7 +75,7 @@ impl MsSql {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = %e, "Failed to create DB pool");
|
||||
StorageError::Connection(format!("Failed to create DB pool: {}", e))
|
||||
StorageError::Connection(format!("Failed to create DB pool: {e}"))
|
||||
})?;
|
||||
|
||||
info!("Successfully created MS SQL storage connection pool");
|
||||
@@ -89,14 +89,14 @@ impl MsSql {
|
||||
info!("Running database migrations");
|
||||
let mut conn = self.pool.get().await.map_err(|e| {
|
||||
error!(error = %e, "Failed to get DB connection for migrations");
|
||||
StorageError::Connection(format!("Failed to get DB connection for migrations: {}", e))
|
||||
StorageError::Connection(format!("Failed to get DB connection for migrations: {e}"))
|
||||
})?;
|
||||
|
||||
ms_database::run_pending_migrations(&mut conn, MIGRATIONS)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = %e, "Failed to run migrations");
|
||||
StorageError::Connection(format!("Failed to run migrations: {}", e))
|
||||
StorageError::Connection(format!("Failed to run migrations: {e}"))
|
||||
})?;
|
||||
info!("Successfully completed database migrations");
|
||||
Ok(())
|
||||
@@ -107,8 +107,7 @@ impl MsSql {
|
||||
let mut conn = self.pool.get().await.map_err(|e| {
|
||||
error!(error = %e, "Failed to get DB connection for dropping tables");
|
||||
StorageError::Connection(format!(
|
||||
"Failed to get DB connection for dropping tables: {}",
|
||||
e
|
||||
"Failed to get DB connection for dropping tables: {e}"
|
||||
))
|
||||
})?;
|
||||
|
||||
@@ -122,7 +121,7 @@ impl MsSql {
|
||||
|
||||
conn.simple_query(&drop_all).await.map_err(|e| {
|
||||
error!(error = ?e, sql = drop_all, "Failed to execute drop for all tables");
|
||||
StorageError::Other(format!("Failed to drop AKD tables: {}", e))
|
||||
StorageError::Other(format!("Failed to drop AKD tables: {e}"))
|
||||
})?;
|
||||
|
||||
info!("Successfully dropped all AKD tables");
|
||||
@@ -134,7 +133,7 @@ impl MsSql {
|
||||
trace!("Acquiring database connection from pool");
|
||||
self.pool.get().await.map_err(|e| {
|
||||
error!(error = %e, "Failed to get DB connection");
|
||||
StorageError::Connection(format!("Failed to get DB connection: {}", e))
|
||||
StorageError::Connection(format!("Failed to get DB connection: {e}"))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -158,7 +157,7 @@ impl MsSql {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = %e, "Failed to execute statement");
|
||||
StorageError::Other(format!("Failed to execute statement: {}", e))
|
||||
StorageError::Other(format!("Failed to execute statement: {e}"))
|
||||
})?;
|
||||
debug!("Statement executed successfully");
|
||||
Ok(())
|
||||
@@ -326,8 +325,7 @@ impl Database for MsSql {
|
||||
})?;
|
||||
error!(error = %e, "batch_set rolled back");
|
||||
Err(StorageError::Other(format!(
|
||||
"Failed to batch set records: {}",
|
||||
e
|
||||
"Failed to batch set records: {e}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -404,7 +402,7 @@ impl Database for MsSql {
|
||||
StorageError::Other(format!("Failed to create temp table: {e}"))
|
||||
})?;
|
||||
let mut bulk = conn
|
||||
.bulk_insert(&temp_table_name)
|
||||
.bulk_insert(temp_table_name)
|
||||
.await
|
||||
.map_err(|e| StorageError::Other(format!("Failed to start bulk insert: {e}")))?;
|
||||
for row in DbRecord::get_batch_temp_table_rows::<St>(ids)? {
|
||||
@@ -520,8 +518,7 @@ impl Database for MsSql {
|
||||
Err(e) => {
|
||||
error!(error = %e, "Failed to get all data for label");
|
||||
Err(StorageError::Other(format!(
|
||||
"Failed to get all data for label: {}",
|
||||
e
|
||||
"Failed to get all data for label: {e}"
|
||||
)))
|
||||
}
|
||||
}
|
||||
@@ -555,9 +552,7 @@ impl Database for MsSql {
|
||||
statement.parse(&row)
|
||||
} else {
|
||||
debug!("Raw label not found");
|
||||
Err(StorageError::NotFound(format!(
|
||||
"ValueState for label not found"
|
||||
)))
|
||||
Err(StorageError::NotFound("ValueState for label not found".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -591,7 +586,7 @@ impl Database for MsSql {
|
||||
|
||||
// Use bulk_insert to insert all the raw_labels into a temporary table
|
||||
let mut bulk = conn
|
||||
.bulk_insert(&temp_table_name)
|
||||
.bulk_insert(temp_table_name)
|
||||
.await
|
||||
.map_err(|e| StorageError::Other(format!("Failed to start bulk insert: {e}")))?;
|
||||
for raw_label in raw_labels {
|
||||
@@ -605,7 +600,7 @@ impl Database for MsSql {
|
||||
.map_err(|e| StorageError::Other(format!("Failed to finalize bulk insert: {e}")))?;
|
||||
|
||||
// read rows matching the raw_labels from the temporary table
|
||||
let statement = values::get_versions_by_flag(&temp_table_name, flag);
|
||||
let statement = values::get_versions_by_flag(temp_table_name, flag);
|
||||
let query_stream = conn
|
||||
.query(statement.sql(), &statement.params())
|
||||
.await
|
||||
|
||||
@@ -32,9 +32,9 @@ impl SqlParam {
|
||||
|
||||
match (starts_with_bracket, ends_with_bracket) {
|
||||
(true, true) => trimmed.to_string(),
|
||||
(true, false) => format!("{}]", trimmed),
|
||||
(false, true) => format!("[{}", trimmed),
|
||||
(false, false) => format!("[{}]", trimmed),
|
||||
(true, false) => format!("{trimmed}]"),
|
||||
(false, true) => format!("[{trimmed}"),
|
||||
(false, false) => format!("[{trimmed}]"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,8 +16,8 @@ use crate::ms_sql::{
|
||||
tables::{temp_table::TempTable, values},
|
||||
};
|
||||
|
||||
const SELECT_AZKS_DATA: &'static [&str] = &["epoch", "num_nodes"];
|
||||
const SELECT_HISTORY_TREE_NODE_DATA: &'static [&str] = &[
|
||||
const SELECT_AZKS_DATA: &[&str] = &["epoch", "num_nodes"];
|
||||
const SELECT_HISTORY_TREE_NODE_DATA: &[&str] = &[
|
||||
"label_len",
|
||||
"label_val",
|
||||
"last_epoch",
|
||||
@@ -41,7 +41,7 @@ const SELECT_HISTORY_TREE_NODE_DATA: &'static [&str] = &[
|
||||
"p_right_child_label_val",
|
||||
"p_hash",
|
||||
];
|
||||
const SELECT_LABEL_DATA: &'static [&str] = &[
|
||||
const SELECT_LABEL_DATA: &[&str] = &[
|
||||
"raw_label",
|
||||
"epoch",
|
||||
"version",
|
||||
@@ -326,7 +326,7 @@ impl AkdStorableForMsSql for DbRecord {
|
||||
INSERT ([akd_key], [epoch], [num_nodes])
|
||||
VALUES (source.[akd_key], source.[epoch], source.[num_nodes]);
|
||||
"#,
|
||||
TempTable::Azks.to_string()
|
||||
TempTable::Azks
|
||||
),
|
||||
StorageType::TreeNode => format!(
|
||||
r#"
|
||||
@@ -405,7 +405,7 @@ impl AkdStorableForMsSql for DbRecord {
|
||||
, source.p_hash
|
||||
);
|
||||
"#,
|
||||
TempTable::HistoryTreeNodes.to_string()
|
||||
TempTable::HistoryTreeNodes
|
||||
),
|
||||
StorageType::ValueState => format!(
|
||||
r#"
|
||||
@@ -422,7 +422,7 @@ impl AkdStorableForMsSql for DbRecord {
|
||||
INSERT (raw_label, epoch, [version], node_label_val, node_label_len, [data])
|
||||
VALUES (source.raw_label, source.epoch, source.[version], source.node_label_val, source.node_label_len, source.[data]);
|
||||
"#,
|
||||
TempTable::Values.to_string()
|
||||
TempTable::Values
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -530,7 +530,7 @@ impl AkdStorableForMsSql for DbRecord {
|
||||
.map(|s| format!("h.{s}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
TempTable::for_ids::<St>().to_string()
|
||||
TempTable::for_ids::<St>()
|
||||
),
|
||||
StorageType::ValueState => format!(
|
||||
r#"
|
||||
@@ -545,7 +545,7 @@ impl AkdStorableForMsSql for DbRecord {
|
||||
.map(|s| format!("v.{s}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", "),
|
||||
TempTable::for_ids::<St>().to_string()
|
||||
TempTable::for_ids::<St>()
|
||||
),
|
||||
}
|
||||
}
|
||||
@@ -641,10 +641,10 @@ impl AkdStorableForMsSql for DbRecord {
|
||||
})?),
|
||||
None => None,
|
||||
};
|
||||
let massaged_hash: akd::Digest = akd::hash::try_parse_digest(&hash.to_vec())
|
||||
let massaged_hash: akd::Digest = akd::hash::try_parse_digest(hash)
|
||||
.map_err(|_| StorageError::Other("hash has incorrect length".to_string()))?;
|
||||
let massaged_p_hash: Option<akd::Digest> = match p_hash {
|
||||
Some(v) => Some(akd::hash::try_parse_digest(&v.to_vec()).map_err(|_| {
|
||||
Some(v) => Some(akd::hash::try_parse_digest(v).map_err(|_| {
|
||||
StorageError::Other("p_hash has incorrect length".to_string())
|
||||
})?),
|
||||
None => None,
|
||||
|
||||
@@ -12,11 +12,11 @@ pub(crate) enum TempTable {
|
||||
impl std::fmt::Display for TempTable {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
TempTable::Ids(_) => write!(f, "{}", TEMP_IDS_TABLE),
|
||||
TempTable::Azks => write!(f, "{}", TEMP_AZKS_TABLE),
|
||||
TempTable::HistoryTreeNodes => write!(f, "{}", TEMP_HISTORY_TREE_NODES_TABLE),
|
||||
TempTable::Values => write!(f, "{}", TEMP_VALUES_TABLE),
|
||||
TempTable::RawLabelSearch => write!(f, "{}", TEMP_SEARCH_LABELS_TABLE),
|
||||
TempTable::Ids(_) => write!(f, "{TEMP_IDS_TABLE}"),
|
||||
TempTable::Azks => write!(f, "{TEMP_AZKS_TABLE}"),
|
||||
TempTable::HistoryTreeNodes => write!(f, "{TEMP_HISTORY_TREE_NODES_TABLE}"),
|
||||
TempTable::Values => write!(f, "{TEMP_VALUES_TABLE}"),
|
||||
TempTable::RawLabelSearch => write!(f, "{TEMP_SEARCH_LABELS_TABLE}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,7 +27,7 @@ impl TempTable {
|
||||
}
|
||||
|
||||
pub fn drop(&self) -> String {
|
||||
format!("DROP TABLE IF EXISTS {}", self.to_string())
|
||||
format!("DROP TABLE IF EXISTS {self}")
|
||||
}
|
||||
|
||||
pub fn can_create(&self) -> bool {
|
||||
|
||||
@@ -151,14 +151,13 @@ pub fn get_versions_by_flag(
|
||||
SELECT t.raw_label, t.version, t.data
|
||||
FROM {TABLE_VALUES} t
|
||||
INNER JOIN (
|
||||
SELECT tmp.raw_label as raw_label, {} as epoch
|
||||
SELECT tmp.raw_label as raw_label, {epoch_col} as epoch
|
||||
FROM {TABLE_VALUES} tmp
|
||||
INNER JOIN {temp_table_name} s ON s.raw_label = tmp.raw_label
|
||||
{}
|
||||
{filter}
|
||||
GROUP BY tmp.raw_label
|
||||
) epochs on epochs.raw_label = t.raw_label AND epochs.epoch = t.epoch
|
||||
"#,
|
||||
epoch_col, filter,
|
||||
);
|
||||
|
||||
QueryStatement::new(sql, params, version_from_row)
|
||||
|
||||
@@ -110,7 +110,7 @@ where
|
||||
Ok(result) => {
|
||||
let value_hex = hex::encode(result.value.as_slice());
|
||||
let value_str = String::from_utf8(result.value.0.clone())
|
||||
.unwrap_or_else(|_| format!("<binary: {}>", value_hex));
|
||||
.unwrap_or_else(|_| format!("<binary: {value_hex}>"));
|
||||
let msg = format!(
|
||||
"Lookup verified for '{a}'\n Epoch: {}\n Version: {}\n Value: {}",
|
||||
result.epoch, result.version, value_str
|
||||
@@ -136,11 +136,11 @@ where
|
||||
let msg = format!("Key history for '{a}': No updates found");
|
||||
let _ = response.send(Ok(msg));
|
||||
} else {
|
||||
let mut msg = format!("Key history for '{a}': {} update(s)\n", num_updates);
|
||||
let mut msg = format!("Key history for '{a}': {num_updates} update(s)\n");
|
||||
for (i, update) in proof.update_proofs.iter().enumerate() {
|
||||
let value_hex = hex::encode(update.value.as_slice());
|
||||
let value_str = String::from_utf8(update.value.0.clone())
|
||||
.unwrap_or_else(|_| format!("<binary: {}>", value_hex));
|
||||
.unwrap_or_else(|_| format!("<binary: {value_hex}>"));
|
||||
msg.push_str(&format!(
|
||||
" [{}] Epoch: {}, Version: {}, Value: {}\n",
|
||||
i + 1, update.epoch, update.version, value_str
|
||||
|
||||
@@ -116,7 +116,7 @@ async fn main() -> Result<()> {
|
||||
// Otherwise, log to console
|
||||
if let Some(ref log_file) = args.log_file {
|
||||
let file = std::fs::File::create(log_file)
|
||||
.with_context(|| format!("Failed to create log file: {}", log_file))?;
|
||||
.with_context(|| format!("Failed to create log file: {log_file}"))?;
|
||||
let file_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(file)
|
||||
.with_ansi(false)
|
||||
@@ -125,7 +125,7 @@ async fn main() -> Result<()> {
|
||||
layers.push(file_layer.boxed());
|
||||
|
||||
// Print a one-time message about logging to file
|
||||
eprintln!("Logging to file: {}", log_file);
|
||||
eprintln!("Logging to file: {log_file}");
|
||||
} else {
|
||||
// Console logging with colors (only when no file specified)
|
||||
let console_layer = tracing_subscriber::fmt::layer()
|
||||
@@ -253,7 +253,7 @@ async fn bench_db_insert(num_users: u64, db: &DatabaseType) -> Result<()> {
|
||||
use owo_colors::OwoColorize;
|
||||
|
||||
println!("{}", "======= Benchmark operation requested =======".cyan());
|
||||
println!("Beginning DB INSERT benchmark of {} users", num_users);
|
||||
println!("Beginning DB INSERT benchmark of {num_users} users");
|
||||
|
||||
let mut values: Vec<String> = vec![];
|
||||
for i in 0..num_users {
|
||||
@@ -306,8 +306,7 @@ async fn bench_publish(
|
||||
|
||||
println!("{}", "======= Benchmark operation requested =======".cyan());
|
||||
println!(
|
||||
"Beginning PUBLISH benchmark of {} users with {} updates/user",
|
||||
num_users, num_updates_per_user
|
||||
"Beginning PUBLISH benchmark of {num_users} users with {num_updates_per_user} updates/user"
|
||||
);
|
||||
|
||||
let users: Vec<String> = (1..=num_users)
|
||||
@@ -394,8 +393,7 @@ async fn bench_lookup(
|
||||
|
||||
println!("{}", "======= Benchmark operation requested =======".cyan());
|
||||
println!(
|
||||
"Beginning LOOKUP benchmark of {} users with {} lookups/user",
|
||||
num_users, num_lookups_per_user
|
||||
"Beginning LOOKUP benchmark of {num_users} users with {num_lookups_per_user} lookups/user"
|
||||
);
|
||||
|
||||
let user_data: Vec<(String, String)> = (1..=num_users)
|
||||
@@ -491,9 +489,9 @@ async fn repl_loop(
|
||||
|
||||
match (db, Command::parse(&mut line)) {
|
||||
(_, Command::Unknown(other)) => {
|
||||
println!("Input '{}' is not supported, enter 'help' for the help menu", other)
|
||||
println!("Input '{other}' is not supported, enter 'help' for the help menu")
|
||||
}
|
||||
(_,Command::InvalidArgs(message)) => println!("Invalid arguments: {}", message),
|
||||
(_,Command::InvalidArgs(message)) => println!("Invalid arguments: {message}"),
|
||||
(_, Command::Exit) => {
|
||||
info!("Exiting...");
|
||||
break;
|
||||
@@ -511,12 +509,12 @@ async fn repl_loop(
|
||||
println!("Database cleaned successfully");
|
||||
}
|
||||
Err(error) => {
|
||||
println!("Error running migrations: {}", error);
|
||||
println!("Error running migrations: {error}");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
println!("Error dropping tables: {}", error);
|
||||
println!("Error dropping tables: {error}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -534,7 +532,7 @@ async fn repl_loop(
|
||||
|
||||
match timeout(Duration::from_secs(30), rpc_rx).await {
|
||||
Ok(Ok(Ok(success))) => {
|
||||
println!("Response: {}", success);
|
||||
println!("Response: {success}");
|
||||
}
|
||||
Ok(Ok(Err(dir_err))) => {
|
||||
error!("Error in directory processing command: {}", dir_err);
|
||||
|
||||
@@ -102,13 +102,13 @@ fn load_migration_from_path(full_path: &Path, relative_path: &str) -> (String, S
|
||||
);
|
||||
}
|
||||
let up_content = fs::read_to_string(&up_sql_path)
|
||||
.unwrap_or_else(|e| panic!("Failed to read up.sql in {}: {}", relative_path, e));
|
||||
.unwrap_or_else(|e| panic!("Failed to read up.sql in {relative_path}: {e}"));
|
||||
|
||||
// Read down.sql (optional)
|
||||
let down_sql_path = full_path.join("down.sql");
|
||||
let down_content = if down_sql_path.exists() {
|
||||
let content = fs::read_to_string(&down_sql_path)
|
||||
.unwrap_or_else(|e| panic!("Failed to read down.sql in {}: {}", relative_path, e));
|
||||
.unwrap_or_else(|e| panic!("Failed to read down.sql in {relative_path}: {e}"));
|
||||
quote! { Some(#content) }
|
||||
} else {
|
||||
quote! { None }
|
||||
@@ -307,7 +307,7 @@ pub fn load_migrations(input: TokenStream) -> TokenStream {
|
||||
// Read all directories in the migrations directory
|
||||
let mut migration_paths = Vec::new();
|
||||
for entry in fs::read_dir(&migrations_path)
|
||||
.unwrap_or_else(|e| panic!("Failed to read migrations directory: {}", e))
|
||||
.unwrap_or_else(|e| panic!("Failed to read migrations directory: {e}"))
|
||||
{
|
||||
let entry = entry.unwrap();
|
||||
let path = entry.path();
|
||||
|
||||
@@ -70,7 +70,7 @@ pub(crate) async fn run_migration(migration: &Migration, conn: &mut ManagedConne
|
||||
conn.simple_query("BEGIN TRANSACTION").await?;
|
||||
|
||||
let result = async {
|
||||
conn.simple_query(&migration.up).await?;
|
||||
conn.simple_query(migration.up).await?;
|
||||
record_migration(conn, migration).await?;
|
||||
Ok::<_, MigrationError>(())
|
||||
}.await;
|
||||
@@ -88,7 +88,7 @@ pub(crate) async fn run_migration(migration: &Migration, conn: &mut ManagedConne
|
||||
}
|
||||
}
|
||||
} else {
|
||||
conn.simple_query(&migration.up).await?;
|
||||
conn.simple_query(migration.up).await?;
|
||||
record_migration(conn, migration).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ impl ManagedConnection {
|
||||
table: &'a str,
|
||||
) -> Result<tiberius::BulkLoadRequest<'a, Stream>, tiberius::error::Error> {
|
||||
debug!(%table, "Starting bulk insert");
|
||||
self.0.bulk_insert(&table).await
|
||||
self.0.bulk_insert(table).await
|
||||
}
|
||||
|
||||
async fn ping(&mut self) -> Result<i32, tiberius::error::Error> {
|
||||
@@ -141,9 +141,8 @@ impl ManageConnection for ConnectionManager {
|
||||
}
|
||||
|
||||
fn has_broken(&self, _conn: &mut Self::Connection) -> bool {
|
||||
self.is_healthy
|
||||
*self.is_healthy
|
||||
.read()
|
||||
.expect("poisoned is_healthy lock")
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user