commit c1890d9f850723ef2e317f743ef6d3cdd335e434
parent 0e2e730565124bb65036175b6e77f7fa08b59f98
Author: Zack Newman <zack@philomathiclife.com>
Date: Wed, 6 Dec 2023 19:24:04 -0700
small cleanup
Diffstat:
32 files changed, 231 insertions(+), 682 deletions(-)
diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs
@@ -99,7 +99,7 @@ fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult
Ok(())
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/accounts/register", data = "<data>")]
fn register(data: JsonUpcase<RegisterData>, _conn: DbConn) -> JsonResult {
err!("Registration is permanently disabled.")
@@ -155,7 +155,6 @@ async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, conn: DbConn
)
}
}
-
let mut user = headers.user;
user.avatar_color = av_data.AvatarColor;
user.save(&conn).await?;
@@ -254,11 +253,9 @@ async fn post_kdf(
if !user.check_valid_password(&kdf_data.MasterPasswordHash) {
err!("Invalid password")
}
-
if kdf_data.Kdf == i32::from(UserKdfType::Pbkdf2) && kdf_data.KdfIterations < 100_000i32 {
err!("PBKDF2 KDF iterations must be at least 100000.")
}
-
if kdf_data.Kdf == i32::from(UserKdfType::Argon2id) {
if kdf_data.KdfIterations < 1i32 {
err!("Argon2 KDF iterations must be at least 1.")
@@ -340,11 +337,9 @@ async fn post_rotatekey(
let Some(mut saved_folder) = Folder::find_by_uuid(&folder_data.Id, &conn).await else {
err!("Folder doesn't exist")
};
-
if &saved_folder.user_uuid != user_uuid {
err!("The folder is not owned by the user")
}
-
saved_folder.name = folder_data.Name;
saved_folder.save(&conn).await?;
}
@@ -411,7 +406,7 @@ struct EmailTokenData {
NewEmail: String,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/accounts/email-token", data = "<data>")]
fn post_email_token(
data: JsonUpcase<EmailTokenData>,
@@ -431,7 +426,7 @@ struct ChangeEmailData {
Token: NumberOrString,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/accounts/email", data = "<data>")]
fn post_email(
data: JsonUpcase<ChangeEmailData>,
@@ -442,6 +437,7 @@ fn post_email(
err!("Email change is not allowed.");
}
+#[allow(clippy::needless_pass_by_value)]
#[post("/accounts/verify-email")]
fn post_verify_email(_headers: Headers) -> EmptyResult {
err!("Cannot verify email address")
@@ -481,7 +477,7 @@ struct DeleteRecoverData {
Email: String,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/accounts/delete-recover", data = "<data>")]
fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, _conn: DbConn) -> EmptyResult {
err!("Please contact the administrator to delete your account");
@@ -532,7 +528,7 @@ async fn delete_account(
otp_data.validate(&user, true, &conn).await?;
user.delete(&conn).await
}
-#[allow(clippy::unnecessary_wraps)]
+#[allow(clippy::needless_pass_by_value, clippy::unnecessary_wraps)]
#[get("/accounts/revision-date")]
fn revision_date(headers: Headers) -> JsonResult {
let revision_date = headers.user.updated_at.timestamp_millis();
@@ -545,7 +541,7 @@ struct PasswordHintData {
Email: String,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/accounts/password-hint", data = "<data>")]
fn password_hint(data: JsonUpcase<PasswordHintData>, _conn: DbConn) -> EmptyResult {
err!("This server is not configured to provide password hints.")
@@ -579,7 +575,6 @@ pub async fn _prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Val
None,
),
};
-
let result = json!({
"Kdf": kdf_type,
"KdfIterations": kdf_iter,
@@ -706,16 +701,25 @@ impl<'r> FromRequest<'r> for KnownDevice {
#[allow(non_snake_case)]
struct PushToken;
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[post("/devices/identifier/<uuid>/token", data = "<data>")]
fn post_device_token(
uuid: &str,
data: JsonUpcase<PushToken>,
- headers: Headers,
- conn: DbConn,
+ _headers: Headers,
+ _conn: DbConn,
) -> EmptyResult {
- put_device_token(uuid, data, headers, conn)
+ Ok(())
}
-#[allow(unused_variables, clippy::unnecessary_wraps)]
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
fn put_device_token(
uuid: &str,
@@ -725,7 +729,11 @@ fn put_device_token(
) -> EmptyResult {
Ok(())
}
-#[allow(unused_variables, clippy::unnecessary_wraps)]
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[put("/devices/identifier/<uuid>/clear-token")]
fn put_clear_device_token(uuid: &str, _conn: DbConn) -> EmptyResult {
Ok(())
@@ -791,11 +799,9 @@ async fn get_auth_request(uuid: &str, conn: DbConn) -> JsonResult {
let Some(auth_request) = AuthRequest::find_by_uuid(uuid, &conn).await else {
err!("AuthRequest doesn't exist")
};
-
let response_date_utc = auth_request
.response_date
.map(|response_date| response_date.and_utc());
-
Ok(Json(json!(
{
"id": uuid,
@@ -877,15 +883,12 @@ async fn get_auth_request_response(uuid: &str, code: &str, conn: DbConn) -> Json
let Some(auth_request) = AuthRequest::find_by_uuid(uuid, &conn).await else {
err!("AuthRequest doesn't exist")
};
-
if !auth_request.check_access_code(code) {
err!("Access code invalid doesn't exist")
}
-
let response_date_utc = auth_request
.response_date
.map(|response_date| response_date.and_utc());
-
Ok(Json(json!(
{
"id": uuid,
diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs
@@ -93,10 +93,8 @@ struct SyncData {
#[get("/sync?<data..>")]
async fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json<Value> {
let user_json = headers.user.to_json(&conn).await;
-
// Get all ciphers which are visible by the user
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
-
let cipher_sync_data =
CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &conn).await;
@@ -113,7 +111,6 @@ async fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json<Value> {
.await,
);
}
-
let collections = Collection::find_by_user_uuid(headers.user.uuid.clone(), &conn).await;
let mut collections_json = Vec::with_capacity(collections.len());
for c in collections {
@@ -122,25 +119,21 @@ async fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json<Value> {
.await,
);
}
-
let folders_json: Vec<Value> = Folder::find_by_user(&headers.user.uuid, &conn)
.await
.iter()
.map(Folder::to_json)
.collect();
-
let policies_json: Vec<Value> = OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn)
.await
.iter()
.map(OrgPolicy::to_json)
.collect();
-
let domains_json = if data.exclude_domains {
Value::Null
} else {
api::core::_get_eq_domains(headers, true).into_inner()
};
-
Json(json!({
"Profile": user_json,
"Folders": folders_json,
@@ -159,7 +152,6 @@ async fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
let cipher_sync_data =
CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &conn).await;
-
let mut ciphers_json = Vec::with_capacity(ciphers.len());
for c in ciphers {
ciphers_json.push(
@@ -172,7 +164,6 @@ async fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
.await,
);
}
-
Json(json!({
"Data": ciphers_json,
"Object": "list",
@@ -185,14 +176,12 @@ async fn get_cipher(uuid: &str, headers: Headers, conn: DbConn) -> JsonResult {
let Some(cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
err!("Cipher doesn't exist")
};
-
if !cipher
.is_accessible_to_user(&headers.user.uuid, &conn)
.await
{
err!("Cipher is not owned by user")
}
-
Ok(Json(
cipher
.to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
@@ -283,22 +272,18 @@ async fn post_ciphers_create(
nt: Notify<'_>,
) -> JsonResult {
let mut data: ShareCipherData = data.into_inner().data;
-
// Check if there are one more more collections selected when this cipher is part of an organization.
// err if this is not the case before creating an empty cipher.
if data.Cipher.OrganizationId.is_some() && data.CollectionIds.is_empty() {
err!("You must select at least one collection.");
}
-
// This check is usually only needed in update_cipher_from_data(), but we
// need it here as well to avoid creating an empty cipher in the call to
// cipher.save() below.
enforce_personal_ownership_policy(Some(&data.Cipher), &headers, &conn).await?;
-
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
cipher.user_uuid = Some(headers.user.uuid.clone());
cipher.save(&conn).await?;
-
// When cloning a cipher, the Bitwarden clients seem to set this field
// based on the cipher being cloned (when creating a new cipher, it's set
// to null as expected). However, `cipher.created_at` is initialized to
@@ -306,7 +291,6 @@ async fn post_ciphers_create(
// line. Since this function only creates new ciphers (whether by cloning
// or otherwise), we can just ignore this field entirely.
data.Cipher.LastKnownRevisionDate = None;
-
share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt).await
}
@@ -319,13 +303,11 @@ async fn post_ciphers(
nt: Notify<'_>,
) -> JsonResult {
let mut data: CipherData = data.into_inner().data;
-
// The web/browser clients set this field to null as expected, but the
// mobile clients seem to set the invalid value `0001-01-01T00:00:00`,
// which results in a warning message being logged. This field isn't
// needed when creating a new cipher, so just ignore it unconditionally.
data.LastKnownRevisionDate = None;
-
let mut cipher = Cipher::new(data.Type, data.Name.clone());
update_cipher_from_data(
&mut cipher,
@@ -337,7 +319,6 @@ async fn post_ciphers(
UpdateType::SyncCipherCreate,
)
.await?;
-
Ok(Json(
cipher
.to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
@@ -377,7 +358,6 @@ pub async fn update_cipher_from_data(
ut: UpdateType,
) -> EmptyResult {
enforce_personal_ownership_policy(Some(&data), headers, conn).await?;
-
// Check that the client isn't updating an existing cipher with stale data.
// And only perform this check when not importing ciphers, else the date/time check will fail.
if ut != UpdateType::None {
@@ -392,11 +372,9 @@ pub async fn update_cipher_from_data(
}
}
}
-
if cipher.organization_uuid.is_some() && cipher.organization_uuid != data.OrganizationId {
err!("Organization mismatch. Please resync the client before updating the cipher")
}
-
if let Some(ref note) = data.Notes {
if note.len() > 10_000 {
err!("The field Notes exceeds the maximum encrypted value length of 10000 characters.")
@@ -427,7 +405,6 @@ pub async fn update_cipher_from_data(
} else {
cipher.user_uuid = Some(headers.user.uuid.clone());
}
-
if let Some(ref folder_id) = data.FolderId {
match Folder::find_by_uuid(folder_id, conn).await {
Some(folder) => {
@@ -455,7 +432,6 @@ pub async fn update_cipher_from_data(
};
json_data
}
-
let type_data_opt = match data.Type {
1i32 => data.Login,
2i32 => data.SecureNote,
@@ -463,7 +439,6 @@ pub async fn update_cipher_from_data(
4i32 => data.Identity,
_ => err!("Invalid type"),
};
-
let type_data = match type_data_opt {
Some(mut data_in_type) => {
// Remove the 'Response' key from the base object.
@@ -476,7 +451,6 @@ pub async fn update_cipher_from_data(
}
None => err!("Data missing"),
};
-
cipher.key = data.Key;
cipher.name = data.Name;
cipher.notes = data.Notes;
@@ -484,7 +458,6 @@ pub async fn update_cipher_from_data(
cipher.data = type_data.to_string();
cipher.password_history = data.PasswordHistory.map(|f| f.to_string());
cipher.reprompt = data.Reprompt;
-
cipher.save(conn).await?;
cipher
.move_to_folder(data.FolderId, &headers.user.uuid, conn)
@@ -492,7 +465,6 @@ pub async fn update_cipher_from_data(
cipher
.set_favorite(data.Favorite, &headers.user.uuid, conn)
.await?;
-
if ut != UpdateType::None {
nt.send_cipher_update(
ut,
@@ -531,36 +503,28 @@ async fn post_ciphers_import(
nt: Notify<'_>,
) -> EmptyResult {
enforce_personal_ownership_policy(None, &headers, &conn).await?;
-
let data: ImportData = data.into_inner().data;
-
// Validate the import before continuing
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
Cipher::validate_notes(&data.Ciphers)?;
-
// Read and create the folders
let mut folders: Vec<_> = Vec::new();
for folder in data.Folders {
let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.Name);
new_folder.save(&conn).await?;
-
folders.push(new_folder);
}
-
// Read the relations between folders and ciphers
let mut relations_map = HashMap::new();
-
for relation in data.FolderRelationships {
relations_map.insert(relation.Key, relation.Value);
}
-
// Read and create the ciphers
for (index, mut cipher_data) in data.Ciphers.into_iter().enumerate() {
let folder_uuid = relations_map.get(&index).map(|i| folders[*i].uuid.clone());
cipher_data.FolderId = folder_uuid;
-
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
update_cipher_from_data(
&mut cipher,
@@ -573,11 +537,9 @@ async fn post_ciphers_import(
)
.await?;
}
-
let mut user = headers.user;
user.update_revision(&conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user).await;
-
Ok(())
}
@@ -624,23 +586,19 @@ async fn put_cipher(
nt: Notify<'_>,
) -> JsonResult {
let data: CipherData = data.into_inner().data;
-
let Some(mut cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
err!("Cipher doesn't exist")
};
-
// TODO: Check if only the folder ID or favorite status is being changed.
// These are per-user properties that technically aren't part of the
// cipher itself, so the user shouldn't need write access to change these.
// Interestingly, upstream Bitwarden doesn't properly handle this either.
-
if !cipher
.is_write_accessible_to_user(&headers.user.uuid, &conn)
.await
{
err!("Cipher is not write accessible")
}
-
update_cipher_from_data(
&mut cipher,
data,
@@ -651,7 +609,6 @@ async fn put_cipher(
UpdateType::SyncCipherUpdate,
)
.await?;
-
Ok(Json(
cipher
.to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
@@ -678,11 +635,9 @@ async fn put_cipher_partial(
conn: DbConn,
) -> JsonResult {
let data: PartialCipherData = data.into_inner().data;
-
let Some(cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
err!("Cipher doesn't exist")
};
-
if let Some(ref folder_id) = data.FolderId {
match Folder::find_by_uuid(folder_id, &conn).await {
Some(folder) => {
@@ -693,7 +648,6 @@ async fn put_cipher_partial(
None => err!("Folder doesn't exist"),
}
}
-
// Move cipher
cipher
.move_to_folder(data.FolderId.clone(), &headers.user.uuid, &conn)
@@ -702,7 +656,6 @@ async fn put_cipher_partial(
cipher
.set_favorite(Some(data.Favorite), &headers.user.uuid, &conn)
.await?;
-
Ok(Json(
cipher
.to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
@@ -758,11 +711,9 @@ async fn post_collections_admin(
nt: Notify<'_>,
) -> EmptyResult {
let data: CollectionsAdminData = data.into_inner().data;
-
let Some(cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
err!("Cipher doesn't exist")
};
-
if !cipher
.is_write_accessible_to_user(&headers.user.uuid, &conn)
.await
@@ -777,7 +728,6 @@ async fn post_collections_admin(
.iter()
.cloned()
.collect();
-
for collection in posted_collections.symmetric_difference(¤t_collections) {
match Collection::find_by_uuid(collection, &conn).await {
None => err!("Invalid collection ID provided"),
@@ -826,7 +776,6 @@ async fn post_cipher_share(
nt: Notify<'_>,
) -> JsonResult {
let data: ShareCipherData = data.into_inner().data;
-
share_cipher_by_uuid(uuid, data, &headers, &conn, &nt).await
}
@@ -868,19 +817,16 @@ async fn put_cipher_share_selected(
err!("Request missing ids field");
}
}
-
while let Some(cipher) = data.Ciphers.pop() {
let mut shared_cipher_data = ShareCipherData {
Cipher: cipher,
CollectionIds: data.CollectionIds.clone(),
};
-
match shared_cipher_data.Cipher.Id.take() {
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &conn, &nt).await?,
None => err!("Request missing ids field"),
};
}
-
Ok(())
}
@@ -904,9 +850,7 @@ async fn share_cipher_by_uuid(
}
None => err!("Cipher doesn't exist"),
};
-
let mut shared_to_collection = false;
-
if let Some(ref organization_uuid) = data.Cipher.OrganizationId {
for col_uuid in &data.CollectionIds {
match Collection::find_by_uuid_and_org(col_uuid, organization_uuid, conn).await {
@@ -925,14 +869,12 @@ async fn share_cipher_by_uuid(
}
}
};
-
// When LastKnownRevisionDate is None, it is a new cipher, so send CipherCreate.
let ut = if data.Cipher.LastKnownRevisionDate.is_some() {
UpdateType::SyncCipherUpdate
} else {
UpdateType::SyncCipherCreate
};
-
update_cipher_from_data(
&mut cipher,
data.Cipher,
@@ -943,7 +885,6 @@ async fn share_cipher_by_uuid(
ut,
)
.await?;
-
Ok(Json(
cipher
.to_json(&headers.user.uuid, None, CipherSyncType::User, conn)
@@ -957,7 +898,7 @@ async fn share_cipher_by_uuid(
/// Upstream added this v2 API to support direct download of attachments from
/// their object storage service. For self-hosted instances, it basically just
/// redirects to the same location as before the v2 API.
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/ciphers/<uuid>/attachment/<attachment_id>")]
fn get_attachment(uuid: &str, attachment_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Attachments are disabled")
@@ -976,7 +917,7 @@ struct AttachmentRequestData {
/// This redirects the client to the API it should use to upload the attachment.
/// For upstream's cloud-hosted service, it's an Azure object storage API.
/// For self-hosted instances, it's another API on the local instance.
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/ciphers/<uuid>/attachment/v2", data = "<data>")]
fn post_attachment_v2(
uuid: &str,
@@ -998,7 +939,7 @@ struct UploadData<'f> {
/// This route needs a rank specified so that Rocket prioritizes the
/// /ciphers/<uuid>/attachment/v2 route, which would otherwise conflict
/// with this one.
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post(
"/ciphers/<uuid>/attachment/<attachment_id>",
format = "multipart/form-data",
@@ -1017,7 +958,7 @@ fn post_attachment_v2_data(
}
/// Legacy API for creating an attachment associated with a cipher.
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post(
"/ciphers/<uuid>/attachment",
format = "multipart/form-data",
@@ -1033,7 +974,7 @@ fn post_attachment(
err!("Attachments are disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post(
"/ciphers/<uuid>/attachment-admin",
format = "multipart/form-data",
@@ -1049,7 +990,7 @@ fn post_attachment_admin(
err!("Attachments are disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post(
"/ciphers/<uuid>/attachment/<attachment_id>/share",
format = "multipart/form-data",
@@ -1066,7 +1007,7 @@ fn post_attachment_share(
err!("Attachments are disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
fn delete_attachment_post_admin(
uuid: &str,
@@ -1078,7 +1019,7 @@ fn delete_attachment_post_admin(
err!("Attachments are disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")]
fn delete_attachment_post(
uuid: &str,
@@ -1090,7 +1031,7 @@ fn delete_attachment_post(
err!("Attachments are disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[delete("/ciphers/<uuid>/attachment/<attachment_id>")]
fn delete_attachment(
uuid: &str,
@@ -1102,7 +1043,7 @@ fn delete_attachment(
err!("Attachments are disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[delete("/ciphers/<uuid>/attachment/<attachment_id>/admin")]
fn delete_attachment_admin(
uuid: &str,
@@ -1157,7 +1098,6 @@ async fn delete_cipher_put_admin(
#[delete("/ciphers/<uuid>")]
async fn delete_cipher(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
_delete_cipher_by_uuid(uuid, &headers, &conn, false, &nt).await
- // permanent delete
}
#[delete("/ciphers/<uuid>/admin")]
@@ -1276,7 +1216,6 @@ async fn move_cipher_selected(
) -> EmptyResult {
let data = data.into_inner().data;
let user_uuid = headers.user.uuid;
-
if let Some(ref folder_id) = data.FolderId {
match Folder::find_by_uuid(folder_id, &conn).await {
Some(folder) => {
@@ -1287,12 +1226,10 @@ async fn move_cipher_selected(
None => err!("Folder doesn't exist"),
}
}
-
for uuid in data.Ids {
let Some(cipher) = Cipher::find_by_uuid(&uuid, &conn).await else {
err!("Cipher doesn't exist")
};
-
if !cipher.is_accessible_to_user(&user_uuid, &conn).await {
err!("Cipher is not accessible by user")
}
@@ -1309,7 +1246,6 @@ async fn move_cipher_selected(
)
.await;
}
-
Ok(())
}
@@ -1339,9 +1275,7 @@ async fn delete_all(
) -> EmptyResult {
let data: PasswordOrOtpData = data.into_inner().data;
let mut user = headers.user;
-
data.validate(&user, true, &conn).await?;
-
if let Some(org_data) = organization {
// Organization ID in query params, purging organization vault
match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn).await {
@@ -1362,15 +1296,12 @@ async fn delete_all(
for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await {
cipher.delete(&conn).await?;
}
-
// Delete folders
for f in Folder::find_by_user(&user.uuid, &conn).await {
f.delete(&conn).await?;
}
-
user.update_revision(&conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user).await;
-
Ok(())
}
}
@@ -1385,14 +1316,12 @@ async fn _delete_cipher_by_uuid(
let Some(mut cipher) = Cipher::find_by_uuid(uuid, conn).await else {
err!("Cipher doesn't exist")
};
-
if !cipher
.is_write_accessible_to_user(&headers.user.uuid, conn)
.await
{
err!("Cipher can't be deleted by user")
}
-
if soft_delete {
cipher.deleted_at = Some(Utc::now().naive_utc());
cipher.save(conn).await?;
@@ -1426,7 +1355,6 @@ async fn _delete_multiple_ciphers(
nt: Notify<'_>,
) -> EmptyResult {
let data: Value = data.into_inner().data;
-
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(Value::as_str),
@@ -1434,7 +1362,6 @@ async fn _delete_multiple_ciphers(
},
None => err!("Request missing ids field"),
};
-
for uuid in uuids {
if let error @ Err(_) =
_delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt).await
@@ -1442,7 +1369,6 @@ async fn _delete_multiple_ciphers(
return error;
};
}
-
Ok(())
}
@@ -1455,14 +1381,12 @@ async fn _restore_cipher_by_uuid(
let Some(mut cipher) = Cipher::find_by_uuid(uuid, conn).await else {
err!("Cipher doesn't exist")
};
-
if !cipher
.is_write_accessible_to_user(&headers.user.uuid, conn)
.await
{
err!("Cipher can't be restored by user")
}
-
cipher.deleted_at = None;
cipher.save(conn).await?;
nt.send_cipher_update(
@@ -1487,7 +1411,6 @@ async fn _restore_multiple_ciphers(
nt: &Notify<'_>,
) -> JsonResult {
let data: Value = data.into_inner().data;
-
let uuids = match data.get("Ids") {
Some(ids) => match ids.as_array() {
Some(ids) => ids.iter().filter_map(Value::as_str),
@@ -1495,7 +1418,6 @@ async fn _restore_multiple_ciphers(
},
None => err!("Request missing ids field"),
};
-
let mut ciphers: Vec<Value> = Vec::new();
for uuid in uuids {
match _restore_cipher_by_uuid(uuid, headers, conn, nt).await {
@@ -1503,7 +1425,6 @@ async fn _restore_multiple_ciphers(
err => return err,
}
}
-
Ok(Json(json!({
"Data": ciphers,
"Object": "list",
@@ -1540,7 +1461,6 @@ impl CipherSyncData {
.await
.into_iter()
.collect();
-
// Generate a HashSet of all the Cipher UUID's which are marked as favorite
cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn)
.await
@@ -1565,7 +1485,6 @@ impl CipherSyncData {
.or_default()
.push(collection);
}
-
// Generate a HashMap with the Organization UUID as key and the UserOrganization record
let user_organizations: HashMap<String, UserOrganization> =
UserOrganization::find_by_user(user_uuid, conn)
@@ -1573,7 +1492,6 @@ impl CipherSyncData {
.into_iter()
.map(|uo| (uo.org_uuid.clone(), uo))
.collect();
-
// Generate a HashMap with the User_Collections UUID as key and the CollectionUser record
let user_collections: HashMap<String, CollectionUser> =
CollectionUser::find_by_user(user_uuid, conn)
diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs
@@ -27,16 +27,18 @@ pub fn routes() -> Vec<Route> {
policies_emergency_access,
]
}
+#[allow(clippy::needless_pass_by_value)]
#[get("/emergency-access/trusted")]
fn get_contacts(_headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
}
+#[allow(clippy::needless_pass_by_value)]
#[get("/emergency-access/granted")]
fn get_grantees(_headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/emergency-access/<emer_id>")]
fn get_emergency_access(emer_id: &str, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
@@ -49,7 +51,7 @@ struct EmergencyAccessUpdateData {
WaitTimeDays: i32,
KeyEncrypted: Option<String>,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[put("/emergency-access/<emer_id>", data = "<data>")]
fn put_emergency_access(
emer_id: &str,
@@ -59,7 +61,7 @@ fn put_emergency_access(
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>", data = "<data>")]
fn post_emergency_access(
emer_id: &str,
@@ -69,13 +71,13 @@ fn post_emergency_access(
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[delete("/emergency-access/<emer_id>")]
fn delete_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> EmptyResult {
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/delete")]
fn post_delete_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> EmptyResult {
err!("Emergency access is not allowed.")
@@ -89,7 +91,7 @@ struct EmergencyAccessInviteData {
WaitTimeDays: i32,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/invite", data = "<data>")]
fn send_invite(
data: JsonUpcase<EmergencyAccessInviteData>,
@@ -99,7 +101,7 @@ fn send_invite(
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/reinvite")]
fn resend_invite(emer_id: &str, _headers: Headers, _conn: DbConn) -> EmptyResult {
err!("Emergency access is not allowed.")
@@ -111,7 +113,7 @@ struct AcceptData {
Token: String,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/accept", data = "<data>")]
fn accept_invite(
emer_id: &str,
@@ -128,7 +130,7 @@ struct ConfirmData {
Key: String,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/confirm", data = "<data>")]
fn confirm_emergency_access(
emer_id: &str,
@@ -139,31 +141,31 @@ fn confirm_emergency_access(
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/initiate")]
fn initiate_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/approve")]
fn approve_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/reject")]
fn reject_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/view")]
fn view_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/takeover")]
fn takeover_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access is not allowed.")
@@ -176,7 +178,7 @@ struct EmergencyAccessPasswordData {
Key: String,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/emergency-access/<emer_id>/password", data = "<data>")]
fn password_emergency_access(
emer_id: &str,
@@ -187,7 +189,7 @@ fn password_emergency_access(
err!("Emergency access is not allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/emergency-access/<emer_id>/policies")]
fn policies_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Emergency access not valid.")
diff --git a/src/api/core/events.rs b/src/api/core/events.rs
@@ -5,9 +5,6 @@ use crate::{
};
use rocket::{form::FromForm, serde::json::Json, Route};
use serde_json::Value;
-
-/// ###############################################################################################################
-/// /api routes
pub fn routes() -> Vec<Route> {
routes![get_org_events, get_cipher_events, get_user_events,]
}
@@ -22,7 +19,11 @@ struct EventRange {
}
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
-#[allow(unused_variables, clippy::unnecessary_wraps)]
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[get("/organizations/<org_id>/events?<data..>")]
fn get_org_events(
org_id: &str,
@@ -37,7 +38,11 @@ fn get_org_events(
})))
}
-#[allow(unused_variables, clippy::unnecessary_wraps)]
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[get("/ciphers/<cipher_id>/events?<data..>")]
fn get_cipher_events(
cipher_id: &str,
@@ -52,7 +57,11 @@ fn get_cipher_events(
})))
}
-#[allow(unused_variables, clippy::unnecessary_wraps)]
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
fn get_user_events(
org_id: &str,
@@ -67,8 +76,6 @@ fn get_user_events(
"ContinuationToken": None::<&str>,
})))
}
-/// ###############################################################################################################
-/// /events routes
pub fn main_routes() -> Vec<Route> {
routes![post_events_collect,]
}
@@ -76,11 +83,14 @@ pub fn main_routes() -> Vec<Route> {
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
struct EventCollection;
-
// Upstream:
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
-#[allow(unused_variables, clippy::unnecessary_wraps)]
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[post("/collect", format = "application/json", data = "<data>")]
fn post_events_collect(
data: JsonUpcaseVec<EventCollection>,
diff --git a/src/api/core/folders.rs b/src/api/core/folders.rs
@@ -22,7 +22,6 @@ pub fn routes() -> Vec<rocket::Route> {
async fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
let folders = Folder::find_by_user(&headers.user.uuid, &conn).await;
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
-
Json(json!({
"Data": folders_json,
"Object": "list",
@@ -35,11 +34,9 @@ async fn get_folder(uuid: &str, headers: Headers, conn: DbConn) -> JsonResult {
let Some(folder) = Folder::find_by_uuid(uuid, &conn).await else {
err!("Invalid folder")
};
-
if folder.user_uuid != headers.user.uuid {
err!("Folder belongs to another user")
}
-
Ok(Json(folder.to_json()))
}
@@ -57,13 +54,10 @@ async fn post_folders(
nt: Notify<'_>,
) -> JsonResult {
let data: FolderData = data.into_inner().data;
-
let mut folder = Folder::new(headers.user.uuid, data.Name);
-
folder.save(&conn).await?;
nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid)
.await;
-
Ok(Json(folder.to_json()))
}
@@ -87,21 +81,16 @@ async fn put_folder(
nt: Notify<'_>,
) -> JsonResult {
let data: FolderData = data.into_inner().data;
-
let Some(mut folder) = Folder::find_by_uuid(uuid, &conn).await else {
err!("Invalid folder")
};
-
if folder.user_uuid != headers.user.uuid {
err!("Folder belongs to another user")
}
-
folder.name = data.Name;
-
folder.save(&conn).await?;
nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid)
.await;
-
Ok(Json(folder.to_json()))
}
@@ -120,7 +109,6 @@ async fn delete_folder(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_
let Some(folder) = Folder::find_by_uuid(uuid, &conn).await else {
err!("Invalid folder")
};
-
if folder.user_uuid != headers.user.uuid {
err!("Folder belongs to another user")
}
diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs
@@ -12,7 +12,6 @@ pub fn routes() -> Vec<Route> {
let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains];
let mut hibp_routes = routes![hibp_breach];
let mut meta_routes = routes![alive, now, version, config];
-
let mut routes = Vec::new();
routes.append(&mut accounts::routes());
routes.append(&mut ciphers::routes());
@@ -26,28 +25,25 @@ pub fn routes() -> Vec<Route> {
routes.append(&mut eq_domains_routes);
routes.append(&mut hibp_routes);
routes.append(&mut meta_routes);
-
routes
}
pub fn events_routes() -> Vec<Route> {
let mut routes = Vec::new();
routes.append(&mut events::main_routes());
-
routes
}
//
// Move this somewhere else
//
-use rocket::{serde::json::Json, Catcher, Route};
-use serde_json::Value;
-
use crate::{
api::{JsonResult, JsonUpcase, Notify},
auth::Headers,
db::DbConn,
};
+use rocket::{serde::json::Json, Catcher, Route};
+use serde_json::Value;
#[derive(Serialize, Deserialize, Debug)]
#[allow(non_snake_case)]
@@ -67,20 +63,15 @@ fn get_eq_domains(headers: Headers) -> Json<Value> {
fn _get_eq_domains(headers: Headers, no_excluded: bool) -> Json<Value> {
let user = headers.user;
use serde_json::from_str;
-
let equivalent_domains: Vec<Vec<String>> = from_str(&user.equivalent_domains).unwrap();
let excluded_globals: Vec<i32> = from_str(&user.excluded_globals).unwrap();
-
let mut globals: Vec<GlobalDomain> = from_str(GLOBAL_DOMAINS).unwrap();
-
for global in &mut globals {
global.Excluded = excluded_globals.contains(&global.Type);
}
-
if no_excluded {
globals.retain(|g| !g.Excluded);
}
-
Json(json!({
"EquivalentDomains": equivalent_domains,
"GlobalEquivalentDomains": globals,
@@ -103,16 +94,12 @@ async fn post_eq_domains(
_nt: Notify<'_>,
) -> JsonResult {
let data: EquivDomainData = data.into_inner().data;
-
let excluded_globals = data.ExcludedGlobalEquivalentDomains.unwrap_or_default();
let equivalent_domains = data.EquivalentDomains.unwrap_or_default();
-
let mut user = headers.user;
use serde_json::to_string;
-
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_owned());
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_owned());
-
user.save(&conn).await?;
Ok(Json(json!({})))
}
@@ -133,6 +120,7 @@ fn hibp_breach(username: &str) -> JsonResult {
}
// We use DbConn here to let the alive healthcheck also verify the database connection.
+#[allow(clippy::needless_pass_by_value)]
#[get("/alive")]
fn alive(_conn: DbConn) -> Json<String> {
now()
diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs
@@ -153,7 +153,7 @@ struct OrgBulkIds {
Ids: Vec<String>,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations", data = "<data>")]
fn create_organization(_headers: Headers, data: JsonUpcase<OrgData>, _conn: DbConn) -> JsonResult {
err!("User not allowed to create organizations")
@@ -295,11 +295,9 @@ async fn get_org_collections_details(
SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json()
})
.collect();
-
if user_org.access_all {
assigned = true;
}
-
let mut json_object = col.to_json();
json_object["Assigned"] = json!(assigned);
json_object["Users"] = json!(users);
@@ -307,7 +305,6 @@ async fn get_org_collections_details(
json_object["Object"] = json!("collectionAccessDetails");
data.push(json_object);
}
-
Ok(Json(json!({
"Data": data,
"Object": "list",
@@ -331,7 +328,6 @@ async fn post_organization_collections(
conn: DbConn,
) -> JsonResult {
let data: NewCollectionData = data.into_inner().data;
-
let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
err!("Can't find organization details")
};
@@ -341,11 +337,9 @@ async fn post_organization_collections(
let Some(org_user) = UserOrganization::find_by_uuid(&user.Id, &conn).await else {
err!("User is not part of organization")
};
-
if org_user.access_all {
continue;
}
-
CollectionUser::save(
&org_user.user_uuid,
&collection.uuid,
@@ -355,7 +349,6 @@ async fn post_organization_collections(
)
.await?;
}
-
if headers.org_user.atype == UserOrgType::Manager && !headers.org_user.access_all {
CollectionUser::save(
&headers.org_user.user_uuid,
@@ -366,7 +359,6 @@ async fn post_organization_collections(
)
.await?;
}
-
Ok(Json(collection.to_json()))
}
@@ -393,15 +385,12 @@ async fn post_organization_collection_update(
let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
err!("Can't find organization details")
};
-
let Some(mut collection) = Collection::find_by_uuid(col_id, &conn).await else {
err!("Collection not found")
};
-
if collection.org_uuid != org.uuid {
err!("Collection is not owned by organization");
}
-
collection.name = data.Name;
collection.external_id = match data.ExternalId {
Some(external_id) if !external_id.trim().is_empty() => Some(external_id),
@@ -413,11 +402,9 @@ async fn post_organization_collection_update(
let Some(org_user) = UserOrganization::find_by_uuid(&user.Id, &conn).await else {
err!("User is not part of organization")
};
-
if org_user.access_all {
continue;
}
-
CollectionUser::save(
&org_user.user_uuid,
col_id,
@@ -427,7 +414,6 @@ async fn post_organization_collection_update(
)
.await?;
}
-
Ok(Json(collection.to_json()))
}
@@ -449,7 +435,6 @@ async fn delete_organization_collection_user(
}
}
};
-
match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &conn).await {
None => err!("User not found in organization"),
Some(user_org) => {
@@ -545,11 +530,8 @@ async fn bulk_delete_organization_collections(
if org_id != data.OrganizationId {
err!("OrganizationId mismatch");
}
-
let collections = data.Ids;
-
let headers = ManagerHeaders::from_loose(headers, &collections, &conn).await?;
-
for col_id in collections {
_delete_organization_collection(org_id, &col_id, &headers, &conn).await?;
}
@@ -569,13 +551,11 @@ async fn get_org_collection_detail(
if collection.org_uuid != org_id {
err!("Collection is not owned by organization")
}
-
let Some(user_org) =
UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &conn).await
else {
err!("User is not part of organization")
};
-
let groups: Vec<Value> = Vec::new();
let mut assigned = false;
let users: Vec<Value> =
@@ -599,13 +579,11 @@ async fn get_org_collection_detail(
if user_org.access_all {
assigned = true;
}
-
let mut json_object = collection.to_json();
json_object["Assigned"] = json!(assigned);
json_object["Users"] = json!(users);
json_object["Groups"] = json!(groups);
json_object["Object"] = json!("collectionAccessDetails");
-
Ok(Json(json_object))
}
}
@@ -623,7 +601,6 @@ async fn get_collection_users(
None => err!("Collection not found in Organization"),
Some(collection) => collection,
};
-
let mut user_list = Vec::new();
for col_user in CollectionUser::find_by_collection(&collection.uuid, &conn).await {
user_list.push(
@@ -633,7 +610,6 @@ async fn get_collection_users(
.to_json_user_access_restrictions(&col_user),
);
}
-
Ok(Json(json!(user_list)))
}
@@ -652,23 +628,18 @@ async fn put_collection_users(
{
err!("Collection not found in Organization")
}
-
// Delete all the user-collections
CollectionUser::delete_all_by_collection(coll_id, &conn).await?;
-
// And then add all the received ones (except if the user has access_all)
for d in data.iter().map(|d| &d.data) {
let Some(user) = UserOrganization::find_by_uuid(&d.Id, &conn).await else {
err!("User is not part of organization")
};
-
if user.access_all {
continue;
}
-
CollectionUser::save(&user.user_uuid, coll_id, d.ReadOnly, d.HidePasswords, &conn).await?;
}
-
Ok(())
}
@@ -727,7 +698,6 @@ async fn get_org_users(
.await,
);
}
-
Json(json!({
"Data": users_json,
"Object": "list",
@@ -743,7 +713,6 @@ async fn post_org_keys(
conn: DbConn,
) -> JsonResult {
let data: OrgKeyData = data.into_inner().data;
-
let mut org = match Organization::find_by_uuid(org_id, &conn).await {
Some(organization) => {
if organization.private_key.is_some() && organization.public_key.is_some() {
@@ -753,12 +722,9 @@ async fn post_org_keys(
}
None => err!("Can't find organization details"),
};
-
org.private_key = Some(data.EncryptedPrivateKey);
org.public_key = Some(data.PublicKey);
-
org.save(&conn).await?;
-
Ok(Json(json!({
"Object": "organizationKeys",
"PublicKey": org.public_key,
@@ -784,7 +750,7 @@ struct InviteData {
AccessAll: Option<bool>,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/users/invite", data = "<data>")]
fn send_invite(
org_id: &str,
@@ -794,7 +760,7 @@ fn send_invite(
) -> EmptyResult {
err!("No more organizations are allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/users/reinvite", data = "<data>")]
fn bulk_reinvite_user(
org_id: &str,
@@ -809,7 +775,7 @@ fn bulk_reinvite_user(
}))
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/users/<user_org>/reinvite")]
fn reinvite_user(
org_id: &str,
@@ -827,7 +793,7 @@ struct AcceptData {
ResetPasswordKey: Option<String>,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/users/<_org_user_id>/accept", data = "<data>")]
fn accept_invite(
org_id: &str,
@@ -838,7 +804,7 @@ fn accept_invite(
err!("No more organizations are allowed.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/users/confirm", data = "<data>")]
fn bulk_confirm_invite(
org_id: &str,
@@ -854,7 +820,7 @@ fn bulk_confirm_invite(
}))
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/users/<org_user_id>/confirm", data = "<data>")]
fn confirm_invite(
org_id: &str,
@@ -879,7 +845,6 @@ async fn get_user(
else {
err!("The specified user isn't a member of the organization")
};
-
// In this case, when groups are requested we also need to include collections.
// Else these will not be shown in the interface, and could lead to missing collections when saved.
let include_groups = data.include_groups.unwrap_or(false);
@@ -926,28 +891,23 @@ async fn edit_user(
conn: DbConn,
) -> EmptyResult {
let data: EditUserData = data.into_inner().data;
-
let Some(new_type) = UserOrgType::from_str(&data.Type.into_string()) else {
err!("Invalid type")
};
-
let Some(mut user_to_edit) =
UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &conn).await
else {
err!("The specified user isn't member of the organization")
};
-
if new_type != user_to_edit.atype
&& (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
&& headers.org_user_type != UserOrgType::Owner
{
err!("Only Owners can grant and remove Admin or Owner privileges")
}
-
if user_to_edit.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can edit Owner users")
}
-
if user_to_edit.atype == UserOrgType::Owner
&& new_type != UserOrgType::Owner
&& user_to_edit.status == i32::from(UserOrgStatus::Confirmed)
@@ -960,7 +920,6 @@ async fn edit_user(
err!("Can't delete the last owner")
}
}
-
// This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type
// It returns different error messages per function.
if new_type < UserOrgType::Admin {
@@ -983,7 +942,6 @@ async fn edit_user(
{
c.delete(&conn).await?;
}
-
// If no accessAll, add the collections received
if !data.AccessAll {
for col in data.Collections.iter().flatten() {
@@ -1014,14 +972,12 @@ async fn bulk_delete_user(
nt: Notify<'_>,
) -> Json<Value> {
let data: OrgBulkIds = data.into_inner().data;
-
let mut bulk_response = Vec::new();
for org_user_id in data.Ids {
let err_msg = match _delete_user(org_id, &org_user_id, &headers, &conn, &nt).await {
Ok(()) => String::new(),
Err(e) => format!("{e:?}"),
};
-
bulk_response.push(json!(
{
"Object": "OrganizationBulkConfirmResponseModel",
@@ -1030,7 +986,6 @@ async fn bulk_delete_user(
}
));
}
-
Json(json!({
"Data": bulk_response,
"Object": "list",
@@ -1072,11 +1027,9 @@ async fn _delete_user(
else {
err!("User to delete isn't member of the organization")
};
-
if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can delete Admins or Owners")
}
-
if user_to_delete.atype == UserOrgType::Owner
&& user_to_delete.status == i32::from(UserOrgStatus::Confirmed)
{
@@ -1090,7 +1043,6 @@ async fn _delete_user(
if let Some(user) = User::find_by_uuid(&user_to_delete.user_uuid, conn).await {
nt.send_user_update(UpdateType::SyncOrgKeys, &user).await;
}
-
user_to_delete.delete(conn).await
}
@@ -1102,7 +1054,6 @@ async fn bulk_public_keys(
conn: DbConn,
) -> Json<Value> {
let data: OrgBulkIds = data.into_inner().data;
-
let mut bulk_response = Vec::new();
// Check all received UserOrg UUID's and find the matching User to retrieve the public-key.
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
@@ -1127,7 +1078,6 @@ async fn bulk_public_keys(
debug!("UserOrg doesn't exist");
}
}
-
Json(json!({
"Data": bulk_response,
"Object": "list",
@@ -1165,13 +1115,11 @@ async fn post_org_import(
) -> EmptyResult {
let data: ImportData = data.into_inner().data;
let org_id = query.organization_id;
-
// Validate the import before continuing
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
Cipher::validate_notes(&data.Ciphers)?;
-
let mut collections = Vec::new();
for coll in data.Collections {
let collection = Collection::new(org_id.clone(), coll.Name, coll.ExternalId);
@@ -1184,15 +1132,12 @@ async fn post_org_import(
collections.push(Ok(collection));
}
}
-
// Read the relations between collections and ciphers
let mut relations = Vec::new();
for relation in data.CollectionRelationships {
relations.push((relation.Key, relation.Value));
}
-
let headers: Headers = headers.into();
-
let mut ciphers = Vec::new();
for cipher_data in data.Ciphers {
let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone());
@@ -1209,7 +1154,6 @@ async fn post_org_import(
.ok();
ciphers.push(cipher);
}
-
// Assign the collections
for (cipher_index, coll_index) in relations {
let cipher_id = &ciphers[cipher_index].uuid;
@@ -1221,7 +1165,6 @@ async fn post_org_import(
CollectionCipher::save(cipher_id, coll_id, &conn).await?;
}
-
let mut user = headers.user;
user.update_revision(&conn).await
}
@@ -1230,7 +1173,6 @@ async fn post_org_import(
async fn list_policies(org_id: &str, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
let policies = OrgPolicy::find_by_org(org_id, &conn).await;
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
-
Json(json!({
"Data": policies_json,
"Object": "list",
@@ -1244,15 +1186,12 @@ async fn list_policies_token(org_id: &str, token: &str, conn: DbConn) -> JsonRes
let Some(invite_org_id) = invite.org_id else {
err!("Invalid token")
};
-
if invite_org_id != org_id {
err!("Token doesn't match request organization");
}
-
// TODO: We receive the invite token as ?token=<>, validate it contains the org id
let policies = OrgPolicy::find_by_org(org_id, &conn).await;
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
-
Ok(Json(json!({
"Data": policies_json,
"Object": "list",
@@ -1294,18 +1233,15 @@ async fn put_policy(
conn: DbConn,
) -> JsonResult {
let data: PolicyData = data.into_inner();
-
let Some(pol_type_enum) = OrgPolicyType::from_i32(pol_type) else {
err!("Invalid or unsupported policy type")
};
-
// When enabling the TwoFactorAuthentication policy, remove this org's members that do have 2FA
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
for member in UserOrganization::find_by_org(org_id, &conn).await {
let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &conn)
.await
.is_empty();
-
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
// Invited users still need to accept the invite and will get an error when they try to accept the invite.
if user_twofactor_disabled
@@ -1316,7 +1252,6 @@ async fn put_policy(
}
}
}
-
// When enabling the SingleOrg policy, remove this org's members that are members of other orgs
if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled {
for member in UserOrganization::find_by_org(org_id, &conn).await {
@@ -1345,7 +1280,7 @@ async fn put_policy(
Ok(Json(policy.to_json()))
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/organizations/<org_id>/tax")]
fn get_organization_tax(org_id: &str, _headers: Headers) -> Json<Value> {
// Prevent a 404 error, which also causes Javascript errors.
@@ -1388,6 +1323,7 @@ fn get_plans_all() -> Json<Value> {
get_plans()
}
+#[allow(clippy::needless_pass_by_value)]
#[get("/plans/sales-tax-rates")]
fn get_plans_tax_rates(_headers: Headers) -> Json<Value> {
// Prevent a 404 error, which also causes Javascript errors.
@@ -1436,19 +1372,16 @@ async fn import(
conn: DbConn,
) -> EmptyResult {
let data = data.into_inner().data;
-
// TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way
// to differentiate between auto-imported users and manually added ones.
// This means that this endpoint can end up removing users that were added manually by an admin,
// as opposed to upstream which only removes auto-imported users.
-
// User needs to be admin or owner to use the Directory Connector
match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &conn).await {
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
None => err!("User not part of organization"),
};
-
for user_data in &data.Users {
if user_data.Deleted {
// If user is marked for deletion and it exists, delete it
@@ -1534,7 +1467,6 @@ async fn bulk_revoke_organization_user(
conn: DbConn,
) -> Json<Value> {
let data = data.into_inner().data;
-
let mut bulk_response = Vec::new();
match data["Ids"].as_array() {
Some(org_users) => {
@@ -1545,7 +1477,6 @@ async fn bulk_revoke_organization_user(
Ok(()) => String::new(),
Err(e) => format!("{e:?}"),
};
-
bulk_response.push(json!(
{
"Object": "OrganizationUserBulkResponseModel",
@@ -1557,7 +1488,6 @@ async fn bulk_revoke_organization_user(
}
None => panic!("No users to revoke"),
}
-
Json(json!({
"Data": bulk_response,
"Object": "list",
@@ -1639,7 +1569,6 @@ async fn bulk_restore_organization_user(
conn: DbConn,
) -> Json<Value> {
let data = data.into_inner().data;
-
let mut bulk_response = Vec::new();
match data["Ids"].as_array() {
Some(org_users) => {
@@ -1650,7 +1579,6 @@ async fn bulk_restore_organization_user(
Ok(()) => String::new(),
Err(e) => format!("{e:?}"),
};
-
bulk_response.push(json!(
{
"Object": "OrganizationUserBulkResponseModel",
@@ -1662,7 +1590,6 @@ async fn bulk_restore_organization_user(
}
None => panic!("No users to restore"),
}
-
Json(json!({
"Data": bulk_response,
"Object": "list",
@@ -1684,7 +1611,6 @@ async fn _restore_organization_user(
if user_org.atype == UserOrgType::Owner && headers.org_user_type != UserOrgType::Owner {
err!("Only owners can restore other owners")
}
-
// This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type
// It returns different error messages per function.
if user_org.atype < UserOrgType::Admin {
@@ -1698,7 +1624,6 @@ async fn _restore_organization_user(
}
}
}
-
user_org.restore();
user_org.save(conn).await?;
}
@@ -1708,7 +1633,11 @@ async fn _restore_organization_user(
Ok(())
}
-#[allow(unused_variables, clippy::unnecessary_wraps)]
+#[allow(
+ unused_variables,
+ clippy::needless_pass_by_value,
+ clippy::unnecessary_wraps
+)]
#[get("/organizations/<org_id>/groups")]
fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, _conn: DbConn) -> JsonResult {
Ok(Json(json!({
@@ -1737,24 +1666,24 @@ impl SelectionReadOnly {
HidePasswords: collection_user.hide_passwords,
}
}
-
fn to_json(&self) -> Value {
json!(self)
}
}
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/groups/<group_id>", data = "<data>")]
fn post_group(
org_id: &str,
group_id: &str,
data: JsonUpcase<GroupRequest>,
- headers: AdminHeaders,
- conn: DbConn,
+ _headers: AdminHeaders,
+ _conn: DbConn,
) -> JsonResult {
- put_group(org_id, group_id, data, headers, conn)
+ err!("Group support is disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/groups", data = "<data>")]
fn post_groups(
org_id: &str,
@@ -1765,7 +1694,7 @@ fn post_groups(
err!("Group support is disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[put("/organizations/<org_id>/groups/<group_id>", data = "<data>")]
fn put_group(
org_id: &str,
@@ -1777,7 +1706,7 @@ fn put_group(
err!("Group support is disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/organizations/<_org_id>/groups/<group_id>/details")]
fn get_group_details(
_org_id: &str,
@@ -1788,25 +1717,28 @@ fn get_group_details(
err!("Group support is disabled");
}
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/groups/<group_id>/delete")]
fn post_delete_group(
org_id: &str,
group_id: &str,
- headers: AdminHeaders,
- conn: DbConn,
+ _headers: AdminHeaders,
+ _conn: DbConn,
) -> EmptyResult {
- _delete_group(org_id, group_id, &headers, &conn)
+ err!("Group support is disabled");
}
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[delete("/organizations/<org_id>/groups/<group_id>")]
-fn delete_group(org_id: &str, group_id: &str, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
- _delete_group(org_id, group_id, &headers, &conn)
-}
-fn _delete_group(_: &str, _: &str, _: &AdminHeaders, _: &DbConn) -> EmptyResult {
+fn delete_group(
+ org_id: &str,
+ group_id: &str,
+ _headers: AdminHeaders,
+ _conn: DbConn,
+) -> EmptyResult {
err!("Group support is disabled");
}
-
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[delete("/organizations/<org_id>/groups", data = "<data>")]
fn bulk_delete_groups(
org_id: &str,
@@ -1817,13 +1749,13 @@ fn bulk_delete_groups(
err!("Group support is disabled");
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/organizations/<_org_id>/groups/<group_id>")]
fn get_group(_org_id: &str, group_id: &str, _headers: AdminHeaders, _conn: DbConn) -> JsonResult {
err!("Group support is disabled");
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/organizations/<_org_id>/groups/<group_id>/users")]
fn get_group_users(
_org_id: &str,
@@ -1833,7 +1765,7 @@ fn get_group_users(
) -> JsonResult {
err!("Group support is disabled");
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[put("/organizations/<org_id>/groups/<group_id>/users", data = "<data>")]
fn put_group_users(
org_id: &str,
@@ -1845,7 +1777,7 @@ fn put_group_users(
err!("Group support is disabled");
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/organizations/<_org_id>/users/<user_id>/groups")]
fn get_user_groups(
_org_id: &str,
@@ -1860,18 +1792,19 @@ fn get_user_groups(
#[allow(non_snake_case)]
struct OrganizationUserUpdateGroupsRequest;
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/users/<org_user_id>/groups", data = "<data>")]
fn post_user_groups(
org_id: &str,
org_user_id: &str,
data: JsonUpcase<OrganizationUserUpdateGroupsRequest>,
- headers: AdminHeaders,
- conn: DbConn,
+ _headers: AdminHeaders,
+ _conn: DbConn,
) -> EmptyResult {
- put_user_groups(org_id, org_user_id, data, headers, conn)
+ err!("Group support is disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[put("/organizations/<org_id>/users/<org_user_id>/groups", data = "<data>")]
fn put_user_groups(
org_id: &str,
@@ -1883,18 +1816,19 @@ fn put_user_groups(
err!("Group support is disabled")
}
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/organizations/<org_id>/groups/<group_id>/delete-user/<org_user_id>")]
fn post_delete_group_user(
org_id: &str,
group_id: &str,
org_user_id: &str,
- headers: AdminHeaders,
- conn: DbConn,
+ _headers: AdminHeaders,
+ _conn: DbConn,
) -> EmptyResult {
- delete_group_user(org_id, group_id, org_user_id, headers, conn)
+ err!("Group support is disabled")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[delete("/organizations/<org_id>/groups/<group_id>/users/<org_user_id>")]
fn delete_group_user(
org_id: &str,
@@ -1925,7 +1859,6 @@ async fn get_organization_keys(org_id: &str, conn: DbConn) -> JsonResult {
let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
err!("Organization not found")
};
-
Ok(Json(json!({
"Object": "organizationKeys",
"PublicKey": org.public_key,
@@ -1949,7 +1882,6 @@ async fn put_reset_password(
let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
err!("Required organization not found")
};
-
let Some(org_user) =
UserOrganization::find_by_uuid_and_org(org_user_id, &org.uuid, &conn).await
else {
@@ -1961,7 +1893,7 @@ async fn put_reset_password(
}
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/organizations/<org_id>/users/<org_user_id>/reset-password-details")]
fn get_reset_password_details(
org_id: &str,
@@ -1972,7 +1904,7 @@ fn get_reset_password_details(
err!("Password reset is not supported on an email-disabled instance.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[put(
"/organizations/<org_id>/users/<org_user_id>/reset-password-enrollment",
data = "<data>"
@@ -1997,7 +1929,6 @@ fn put_reset_password_enrollment(
#[get("/organizations/<org_id>/export")]
async fn get_org_export(org_id: &str, headers: AdminHeaders, conn: DbConn) -> Json<Value> {
use semver::{Version, VersionReq};
-
// Since version v2023.1.0 the format of the export is different.
// Also, this endpoint was created since v2022.9.0.
// Therefore, we will check for any version smaller then v2023.1.0 and return a different response.
@@ -2008,7 +1939,6 @@ async fn get_org_export(org_id: &str, headers: AdminHeaders, conn: DbConn) -> Js
let client_version = Version::parse(&client_version).unwrap();
ver_match.matches(&client_version)
});
-
// Also both main keys here need to be lowercase, else the export will fail.
if use_list_response_model {
// Backwards compatible pre v2023.1.0 response
@@ -2042,10 +1972,8 @@ async fn _api_key(
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
-
// Validate the admin users password/otp
data.validate(&user, true, &conn).await?;
-
let org_api_key =
if let Some(mut org_api_key) = OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
if rotate {
@@ -2066,7 +1994,6 @@ async fn _api_key(
.expect("Error creating organization API Key");
new_org_api_key
};
-
Ok(Json(json!({
"ApiKey": org_api_key.api_key,
"RevisionDate": crate::util::format_date(&org_api_key.revision_date),
diff --git a/src/api/core/public.rs b/src/api/core/public.rs
@@ -30,10 +30,9 @@ struct OrgImportUserData {
struct OrgImportData {
Members: Vec<OrgImportUserData>,
OverwriteExisting: bool,
- // LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/public/organization/import", data = "<data>")]
fn ldap_import(data: JsonUpcase<OrgImportData>, _token: PublicToken, _conn: DbConn) -> EmptyResult {
err!("LDAP import is permanently disabled.")
@@ -44,7 +43,6 @@ struct PublicToken(String);
#[rocket::async_trait]
impl<'r> FromRequest<'r> for PublicToken {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> request::Outcome<Self, Self::Error> {
let headers = request.headers();
// Get access_token
@@ -87,7 +85,7 @@ impl<'r> FromRequest<'r> for PublicToken {
None => err_handler!("Invalid client_id"),
}
}
- _ => err_handler!("Error getting DB"),
+ Outcome::Error(_) | Outcome::Forward(_) => err_handler!("Error getting DB"),
};
if org_api_key.org_uuid != claims.client_sub {
err_handler!("Token not issued for this org");
@@ -95,7 +93,6 @@ impl<'r> FromRequest<'r> for PublicToken {
if org_api_key.uuid != claims.sub {
err_handler!("Token not issued for this client");
}
-
Outcome::Success(Self(claims.client_sub))
}
}
diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs
@@ -45,7 +45,7 @@ struct SendData {
FileLength: Option<NumberOrString>,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/sends")]
fn get_sends(_headers: Headers, _conn: DbConn) -> Json<Value> {
Json(json!({
@@ -55,13 +55,13 @@ fn get_sends(_headers: Headers, _conn: DbConn) -> Json<Value> {
}))
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/sends/<uuid>")]
fn get_send(uuid: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Sends are permanently disabled.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/sends", data = "<data>")]
fn post_send(
data: JsonUpcase<SendData>,
@@ -85,7 +85,7 @@ struct UploadDataV2<'f> {
data: TempFile<'f>,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/sends/file", format = "multipart/form-data", data = "<data>")]
fn post_send_file(
data: Form<UploadData<'_>>,
@@ -96,13 +96,13 @@ fn post_send_file(
err!("Sends are permanently disabled.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/sends/file/v2", data = "<data>")]
fn post_send_file_v2(data: JsonUpcase<SendData>, _headers: Headers, _conn: DbConn) -> JsonResult {
err!("Sends are permanently disabled.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post(
"/sends/<send_uuid>/file/<file_id>",
format = "multipart/form-data",
@@ -125,7 +125,7 @@ struct SendAccessData {
Password: Option<String>,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/sends/access/<access_id>", data = "<data>")]
fn post_access(
access_id: &str,
@@ -137,7 +137,7 @@ fn post_access(
err!("Sends are permanently disabled.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/sends/<send_id>/access/file/<file_id>", data = "<data>")]
fn post_access_file(
send_id: &str,
@@ -150,13 +150,13 @@ fn post_access_file(
err!("Sends are permanently disabled.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[get("/sends/<send_id>/<file_id>?<t>")]
fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Option<NamedFile> {
None
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[put("/sends/<id>", data = "<data>")]
fn put_send(
id: &str,
@@ -168,13 +168,13 @@ fn put_send(
err!("Sends are permanently disabled.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[delete("/sends/<id>")]
fn delete_send(id: &str, _headers: Headers, _conn: DbConn, _nt: Notify<'_>) -> EmptyResult {
err!("Sends are permanently disabled.")
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[put("/sends/<id>/remove-password")]
fn put_remove_password(id: &str, _headers: Headers, _conn: DbConn, _nt: Notify<'_>) -> JsonResult {
err!("Sends are permanently disabled.")
diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs
@@ -30,10 +30,9 @@ async fn generate_authenticator(
data.validate(&user, false, &conn).await?;
let type_ = i32::from(TwoFactorType::Authenticator);
let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await;
-
let (enabled, key) = match twofactor {
Some(tf) => (true, tf.data),
- _ => (false, crypto::encode_random_bytes::<20>(BASE32)),
+ _ => (false, crypto::encode_random_bytes::<20>(&BASE32)),
};
Ok(Json(json!({
"Enabled": enabled,
@@ -61,24 +60,20 @@ async fn activate_authenticator(
let key = data.Key;
let token = data.Token.into_string();
let user = headers.user;
-
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
.validate(&user, true, &conn)
.await?;
-
// Validate key as base32 and 20 bytes length
let decoded_key: Vec<u8> = match BASE32.decode(key.as_bytes()) {
Ok(decoded) => decoded,
_ => err!("Invalid totp secret"),
};
-
if decoded_key.len() != 20 {
err!("Invalid key length")
}
-
// Validate the token provided with the key, and save new twofactor
validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &headers.ip, &conn).await?;
Ok(Json(json!({
@@ -107,7 +102,6 @@ pub async fn validate_totp_code_str(
if !totp_code.chars().all(char::is_numeric) {
err!("TOTP code is not a number");
}
-
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
}
#[allow(clippy::integer_division, clippy::redundant_else)]
@@ -119,7 +113,6 @@ async fn validate_totp_code(
conn: &DbConn,
) -> EmptyResult {
use totp_lite::{totp_custom, Sha1};
-
let Ok(decoded_secret) = BASE32.decode(secret.as_bytes()) else {
err!("Invalid TOTP secret")
};
@@ -143,11 +136,11 @@ async fn validate_totp_code(
let current_time = chrono::Utc::now();
let current_timestamp = current_time.timestamp();
let time_step = current_timestamp / 30i64;
- // We need to calculate the time offsite and cast it as an u64.
- // Since we only have times into the future and the totp generator needs an u64 instead of the default i64.
+ // We need to calculate the time offsite and cast it as a u64.
+ // Since we only have times into the future and the totp generator needs, a u64 instead of the default i64.
let time = u64::try_from(current_timestamp).expect("underflow when casting to a u64 in TOTP");
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
- // Check the the given code equals the generated and if the time_step is larger then the one last used.
+ // Check the given code equals the generated one and if the time_step is larger than the one last used.
if generated == totp_code && time_step > i64::from(twofactor.last_used) {
// Save the last used time step so only totp time steps higher then this one are allowed.
// This will also save a newly created twofactor if the code is correct.
diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs
@@ -22,7 +22,6 @@ pub fn routes() -> Vec<Route> {
disable_twofactor_put,
get_device_verification_settings,
];
-
routes.append(&mut authenticator::routes());
routes.append(&mut protected_actions::routes());
routes.append(&mut webauthn::routes());
@@ -33,7 +32,6 @@ pub fn routes() -> Vec<Route> {
async fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn).await;
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
-
Json(json!({
"Data": twofactors_json,
"Object": "list",
@@ -49,9 +47,7 @@ async fn get_recover(
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
-
data.validate(&user, true, &conn).await?;
-
Ok(Json(json!({
"Code": user.totp_recover,
"Object": "twoFactorRecover"
@@ -78,17 +74,14 @@ async fn recover(
let Some(mut user) = User::find_by_mail(&data.Email, &conn).await else {
err!("Username or password is incorrect. Try again.")
};
-
// Check password
if !user.check_valid_password(&data.MasterPasswordHash) {
err!("Username or password is incorrect. Try again.")
}
-
// Check if recovery code is correct
if !user.check_valid_recovery_code(&data.RecoveryCode) {
err!("Recovery code is incorrect. Try again.")
}
-
// Remove all twofactors from the user
TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
// Remove the recovery code, not needed without twofactors
@@ -113,7 +106,6 @@ async fn disable_twofactor(
) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let user = headers.user;
-
// Delete directly after a valid token has been provided
PasswordOrOtpData {
MasterPasswordHash: data.MasterPasswordHash,
@@ -121,15 +113,11 @@ async fn disable_twofactor(
}
.validate(&user, true, &conn)
.await?;
-
let type_ = data.Type.into_i32()?;
-
if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await {
twofactor.delete(&conn).await?;
}
-
let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).await.is_empty();
-
if twofactor_disabled {
for user_org in UserOrganization::find_by_user_and_policy(
&user.uuid,
@@ -143,7 +131,6 @@ async fn disable_twofactor(
}
}
}
-
Ok(Json(json!({
"Enabled": false,
"Type": type_,
@@ -169,6 +156,7 @@ async fn disable_twofactor_put(
// https://github.com/bitwarden/server/pull/2016
//
// The HTML part is hidden via the CSS patches done via the bw_web_build repo
+#[allow(clippy::needless_pass_by_value)]
#[get("/two-factor/get-device-verification-settings")]
fn get_device_verification_settings(_headers: Headers, _conn: DbConn) -> Json<Value> {
Json(json!({
diff --git a/src/api/core/two_factor/protected_actions.rs b/src/api/core/two_factor/protected_actions.rs
@@ -42,6 +42,7 @@ impl ProtectedActionData {
}
}
+#[allow(clippy::needless_pass_by_value)]
#[post("/accounts/request-otp")]
fn request_otp(_headers: Headers, _conn: DbConn) -> EmptyResult {
err!("Email is disabled for this server. Either enable email or login using your master password instead of login via device.")
@@ -53,7 +54,7 @@ struct ProtectedActionVerify {
OTP: String,
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/accounts/verify-otp", data = "<data>")]
fn verify_otp(
data: JsonUpcase<ProtectedActionVerify>,
@@ -79,7 +80,6 @@ pub async fn validate_protected_action_otp(
"Protected action token not found, try sending the code again or restart the process",
)?;
let mut pa_data = ProtectedActionData::from_json(&pa.data)?;
-
pa_data.add_attempt();
// Delete the token after x attempts if it has been used too many times
// We use the 6, which should be more then enough for invalid attempts and multiple valid checks
diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs
@@ -32,28 +32,6 @@ pub fn routes() -> Vec<Route> {
]
}
-// Some old u2f structs still needed for migrating from u2f to WebAuthn
-// Both `struct Registration` and `struct U2FRegistration` can be removed if we remove the u2f to WebAuthn migration
-#[derive(Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-struct Registration {
- key_handle: Vec<u8>,
- pub_key: Vec<u8>,
- attestation_cert: Option<Vec<u8>>,
- device_name: Option<String>,
-}
-
-#[derive(Serialize, Deserialize)]
-struct U2FRegistration {
- id: i32,
- name: String,
- #[serde(with = "Registration")]
- reg: Registration,
- counter: u32,
- compromised: bool,
- migrated: Option<bool>,
-}
-
struct WebauthnConfig {
url: String,
origin: Url,
@@ -76,15 +54,12 @@ impl webauthn_rs::WebauthnConfig for WebauthnConfig {
fn get_relying_party_name(&self) -> &str {
&self.url
}
-
fn get_origin(&self) -> &Url {
&self.origin
}
-
fn get_relying_party_id(&self) -> &str {
&self.rpid
}
-
/// We have WebAuthn configured to discourage user verification
/// if we leave this enabled, it will cause verification issues when a keys send UV=1.
/// Upstream (the library they use) ignores this when set to discouraged, so we should too.
@@ -119,9 +94,7 @@ async fn get_webauthn(
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
-
data.validate(&user, false, &conn).await?;
-
let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &conn).await?;
let registrations_json: Vec<Value> = registrations
.iter()
@@ -149,7 +122,6 @@ async fn generate_webauthn_challenge(
.into_iter()
.map(|r| r.credential.cred_id) // We return the credentialIds to the clients to avoid double registering
.collect();
-
let (challenge, state) = WebauthnConfig::load().generate_challenge_register_options(
user.uuid.as_bytes().to_vec(),
user.email,
@@ -158,7 +130,6 @@ async fn generate_webauthn_challenge(
None,
None,
)?;
-
let type_ = TwoFactorType::WebauthnRegisterChallenge;
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?)
.save(&conn)
@@ -272,7 +243,6 @@ async fn activate_webauthn(
}
.validate(&user, true, &conn)
.await?;
-
// Retrieve and delete the saved challenge state
let type_ = i32::from(TwoFactorType::WebauthnRegisterChallenge);
let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await {
@@ -283,22 +253,18 @@ async fn activate_webauthn(
}
None => err!("Can't recover challenge"),
};
-
// Verify the credentials with the saved state
let (credential, _data) =
WebauthnConfig::load()
.register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
-
let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn).await?.1;
// TODO: Check for repeated ID's
registrations.push(WebauthnRegistration {
id: data.Id.into_i32()?,
name: data.Name,
migrated: false,
-
credential,
});
-
// Save the registrations and return them
TwoFactor::new(
user.uuid.clone(),
@@ -388,18 +354,15 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResu
.into_iter()
.map(|r| r.credential)
.collect();
-
if creds.is_empty() {
err!("No Webauthn devices registered")
}
-
// Generate a challenge based on the credentials
let ext = RequestAuthenticationExtensions::builder()
.appid(format!("{}/app-id.json", config::get_config().domain))
.build();
let (response, state) =
WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?;
-
// Save the challenge state for later validation
TwoFactor::new(
user_uuid.into(),
@@ -408,7 +371,6 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResu
)
.save(conn)
.await?;
-
// Return challenge to the clients
Ok(Json(serde_json::to_value(response.public_key)?))
}
@@ -430,13 +392,10 @@ pub async fn validate_webauthn_login(
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.data.into();
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
- // If the credential we received is migrated from U2F, enable the U2F compatibility
- //let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
let (cred_id, auth_data) = WebauthnConfig::load().authenticate_credential(&rsp, &state)?;
for reg in &mut registrations {
if ®.credential.cred_id == cred_id {
reg.credential.counter = auth_data.counter;
-
TwoFactor::new(
user_uuid.to_owned(),
TwoFactorType::Webauthn,
@@ -447,6 +406,5 @@ pub async fn validate_webauthn_login(
return Ok(());
}
}
-
err!("Credential not present")
}
diff --git a/src/api/identity.rs b/src/api/identity.rs
@@ -1,11 +1,3 @@
-use num_traits::FromPrimitive;
-use rocket::serde::json::Json;
-use rocket::{
- form::{Form, FromForm},
- Route,
-};
-use serde_json::Value;
-
use crate::{
api::{
core::accounts::{PreloginData, RegisterData, _prelogin},
@@ -23,6 +15,13 @@ use crate::{
error::MapResult,
util,
};
+use num_traits::FromPrimitive;
+use rocket::serde::json::Json;
+use rocket::{
+ form::{Form, FromForm},
+ Route,
+};
+use serde_json::Value;
pub fn routes() -> Vec<Route> {
routes![login, prelogin, identity_register]
@@ -42,22 +41,18 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, conn: DbCo
_check_is_some(&data.password, "password cannot be blank")?;
_check_is_some(&data.scope, "scope cannot be blank")?;
_check_is_some(&data.username, "username cannot be blank")?;
-
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
-
_password_login(data, &mut user_uuid, &conn, &client_header.ip).await
}
"client_credentials" => {
_check_is_some(&data.client_id, "client_id cannot be blank")?;
_check_is_some(&data.client_secret, "client_secret cannot be blank")?;
_check_is_some(&data.scope, "scope cannot be blank")?;
-
_check_is_some(&data.device_identifier, "device_identifier cannot be blank")?;
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
-
_api_key_login(data, &mut user_uuid, &conn, &client_header.ip).await
}
t => err!("Invalid type", t),
@@ -68,21 +63,17 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, conn: DbCo
async fn _refresh_login(data: ConnectData, conn: &DbConn) -> JsonResult {
// Extract token
let token = data.refresh_token.unwrap();
-
// Get device by refresh token
let mut device = Device::find_by_refresh_token(&token, conn)
.await
.map_res("Invalid refresh token")?;
-
let scope = "api offline_access";
let scope_vec = vec!["api".into(), "offline_access".into()];
-
// Common
let user = User::find_by_uuid(&device.user_uuid, conn).await.unwrap();
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
- let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
+ let (access_token, expires_in) = device.refresh_tokens(&user, &orgs, scope_vec);
device.save(conn).await?;
-
let result = json!({
"access_token": access_token,
"expires_in": expires_in,
@@ -90,7 +81,6 @@ async fn _refresh_login(data: ConnectData, conn: &DbConn) -> JsonResult {
"refresh_token": device.refresh_token,
"Key": user.akey,
"PrivateKey": user.private_key,
-
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
@@ -99,7 +89,6 @@ async fn _refresh_login(data: ConnectData, conn: &DbConn) -> JsonResult {
"scope": scope,
"unofficialServer": true,
});
-
Ok(Json(result))
}
#[allow(clippy::else_if_without_else)]
@@ -123,7 +112,6 @@ async fn _password_login(
format!("IP: {}. Username: {}.", ip.ip, username)
)
};
-
// Set the user_uuid here to be passed back used for event logging.
*user_uuid = Some(user.uuid.clone());
// Check password
@@ -154,12 +142,10 @@ async fn _password_login(
if user.password_iterations != config::get_config().password_iterations {
user.password_iterations = config::get_config().password_iterations;
user.set_password(password, None, false, None);
-
if let Err(e) = user.save(conn).await {
panic!("Error updating user: {e:#?}");
}
}
-
// Check if the user is disabled
if !user.enabled {
err!(
@@ -170,9 +156,8 @@ async fn _password_login(
let (mut device, _) = get_device(&data, conn, &user).await;
let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
- let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
+ let (access_token, expires_in) = device.refresh_tokens(&user, &orgs, scope_vec);
device.save(conn).await?;
-
let mut result = json!({
"access_token": access_token,
"expires_in": expires_in,
@@ -180,8 +165,6 @@ async fn _password_login(
"refresh_token": device.refresh_token,
"Key": user.akey,
"PrivateKey": user.private_key,
- //"TwoFactorToken": "11122233333444555666777888999"
-
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
@@ -194,11 +177,9 @@ async fn _password_login(
"Object": "userDecryptionOptions"
},
});
-
if let Some(token) = twofactor_token {
result["TwoFactorToken"] = Value::String(token);
}
-
info!("User {} logged in successfully. IP: {}", username, ip.ip);
Ok(Json(result))
}
@@ -231,10 +212,8 @@ async fn _user_api_key_login(
let Some(user) = User::find_by_uuid(client_user_uuid, conn).await else {
err!("Invalid client_id", format!("IP: {}.", ip.ip))
};
-
// Set the user_uuid here to be passed back used for event logging.
*user_uuid = Some(user.uuid.clone());
-
// Check if the user is disabled
if !user.enabled {
err!(
@@ -242,7 +221,6 @@ async fn _user_api_key_login(
format!("IP: {}. Username: {}.", ip.ip, user.email)
)
}
-
// Check API key. Note that API key logins bypass 2FA.
let client_secret = data.client_secret.as_ref().unwrap();
if !user.check_valid_api_key(client_secret) {
@@ -251,18 +229,15 @@ async fn _user_api_key_login(
format!("IP: {}. Username: {}.", ip.ip, user.email)
)
}
-
let (mut device, _) = get_device(&data, conn, &user).await;
let scope_vec = vec!["api".into()];
let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
- let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec);
+ let (access_token, expires_in) = device.refresh_tokens(&user, &orgs, scope_vec);
device.save(conn).await?;
-
info!(
"User {} logged in successfully via API key. IP: {}",
user.email, ip.ip
);
-
// Note: No refresh_token is returned. The CLI just repeats the
// client_credentials login flow when the existing token expires.
let result = json!({
@@ -271,7 +246,6 @@ async fn _user_api_key_login(
"token_type": "Bearer",
"Key": user.akey,
"PrivateKey": user.private_key,
-
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter,
"KdfMemory": user.client_kdf_memory,
@@ -280,7 +254,6 @@ async fn _user_api_key_login(
"scope": "api",
"unofficialServer": true,
});
-
Ok(Json(result))
}
@@ -297,7 +270,6 @@ async fn _organization_api_key_login(
let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await else {
err!("Invalid client_id", format!("IP: {}.", ip.ip))
};
-
// Check API key.
let client_secret = data.client_secret.as_ref().unwrap();
if !org_api_key.check_valid_api_key(client_secret) {
@@ -306,10 +278,8 @@ async fn _organization_api_key_login(
format!("IP: {}. Organization: {}.", ip.ip, org_api_key.org_uuid)
)
}
-
let claim = generate_organization_api_key_login_claims(org_api_key.uuid, org_api_key.org_uuid);
let access_token = crate::auth::encode_jwt(&claim);
-
Ok(Json(json!({
"access_token": access_token,
"expires_in": 3600i32,
@@ -396,14 +366,12 @@ async fn _json_err_twofactor(
conn: &DbConn,
) -> ApiResult<Value> {
use crate::api::core::two_factor;
-
let mut result = json!({
"error" : "invalid_grant",
"error_description" : "Two factor required.",
"TwoFactorProviders" : providers,
"TwoFactorProviders2" : {} // { "0" : null }
});
-
for provider in providers {
result["TwoFactorProviders2"][provider.to_string()] = Value::Null;
@@ -415,7 +383,6 @@ async fn _json_err_twofactor(
result["TwoFactorProviders2"][provider.to_string()] = request.0;
}
}
-
Ok(result)
}
@@ -424,7 +391,7 @@ async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::needless_pass_by_value)]
#[post("/accounts/register", data = "<data>")]
fn identity_register(data: JsonUpcase<RegisterData>, _conn: DbConn) -> JsonResult {
err!("No more registerations allowed.")
@@ -438,12 +405,10 @@ struct ConnectData {
#[field(name = uncased("grant_type"))]
#[field(name = uncased("granttype"))]
grant_type: String, // refresh_token, password, client_credentials (API key)
-
// Needed for grant_type="refresh_token"
#[field(name = uncased("refresh_token"))]
#[field(name = uncased("refreshtoken"))]
refresh_token: Option<String>,
-
// Needed for grant_type = "password" | "client_credentials"
#[field(name = uncased("client_id"))]
#[field(name = uncased("clientid"))]
@@ -457,7 +422,6 @@ struct ConnectData {
scope: Option<String>,
#[field(name = uncased("username"))]
username: Option<String>,
-
#[field(name = uncased("device_identifier"))]
#[field(name = uncased("deviceidentifier"))]
device_identifier: Option<String>,
@@ -470,7 +434,6 @@ struct ConnectData {
#[field(name = uncased("device_push_token"))]
#[field(name = uncased("devicepushtoken"))]
_device_push_token: Option<String>, // Unused; mobile device push not yet supported.
-
// Needed for two-factor auth
#[field(name = uncased("two_factor_provider"))]
#[field(name = uncased("twofactorprovider"))]
diff --git a/src/api/mod.rs b/src/api/mod.rs
@@ -24,12 +24,10 @@ use crate::db::{models::User, DbConn};
use crate::util;
use rocket::serde::json::Json;
use serde_json::Value;
-
// Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>;
type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>;
-
type JsonUpcase<T> = Json<util::UpCase<T>>;
type JsonUpcaseVec<T> = Json<Vec<util::UpCase<T>>>;
type JsonVec<T> = Json<Vec<T>>;
@@ -48,7 +46,6 @@ impl PasswordOrOtpData {
/// This is different per caller, so it can be adjusted to delete the token or not
async fn validate(&self, user: &User, delete_if_valid: bool, conn: &DbConn) -> EmptyResult {
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
-
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) {
(Some(pw_hash), None) => {
if !user.check_valid_password(pw_hash) {
@@ -78,13 +75,11 @@ impl NumberOrString {
Self::String(s) => s,
}
}
-
- #[allow(clippy::wrong_self_convention)]
- fn into_i32(&self) -> ApiResult<i32> {
+ fn into_i32(self) -> ApiResult<i32> {
use std::num::ParseIntError as PIE;
- match *self {
+ match self {
Self::Number(n) => Ok(n),
- Self::String(ref s) => s
+ Self::String(s) => s
.parse()
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
}
diff --git a/src/api/notifications.rs b/src/api/notifications.rs
@@ -118,10 +118,8 @@ fn websockets_hub<'r>(
let Ok(claims) = crate::auth::decode_login(&token) else {
err_code!("Invalid token", 401)
};
-
let (mut rx, guard) = {
let users = Arc::clone(ws_users());
-
// Add a channel to send messages to this client to the map
let entry_uuid = uuid::Uuid::new_v4();
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
@@ -134,7 +132,6 @@ fn websockets_hub<'r>(
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
(rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid))
};
-
Ok({
rocket_ws::Stream! { ws => {
let mut ws_copy = ws;
@@ -152,7 +149,6 @@ fn websockets_hub<'r>(
// We should receive an initial message with the protocol and version, and we will reply to it
Message::Text(ref message) => {
let msg = message.strip_suffix(char::from(RECORD_SEPARATOR)).unwrap_or(message);
-
if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) {
yield Message::binary(INITIAL_RESPONSE);
continue;
@@ -168,14 +164,12 @@ fn websockets_hub<'r>(
_ => break,
}
}
-
res = rx.recv() => {
match res {
Some(res) => yield res,
None => break,
}
}
-
_ = interval.tick() => yield Message::Ping(create_ping())
}
}
@@ -191,15 +185,12 @@ fn anonymous_websockets_hub<'r>(
) -> Result<rocket_ws::Stream!['r], Error> {
let (mut rx, guard) = {
let subscriptions = Arc::clone(ws_anonymous_subscriptions());
-
// Add a channel to send messages to this client to the map
let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100);
subscriptions.map.insert(token.clone(), tx);
-
// Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map
(rx, WSAnonymousEntryMapGuard::new(subscriptions, token))
};
-
Ok({
rocket_ws::Stream! { ws => {
let mut ws_copy = ws;
@@ -232,28 +223,22 @@ fn anonymous_websockets_hub<'r>(
_ => break,
}
}
-
res = rx.recv() => {
match res {
Some(res) => yield res,
None => break,
}
}
-
_ = interval.tick() => yield Message::Ping(create_ping())
}
}
}}
})
}
-
-//
-// Websockets server
-//
-fn serialize(val: Value) -> Vec<u8> {
+fn serialize(val: &Value) -> Vec<u8> {
use rmpv::encode::write_value;
let mut buf = Vec::new();
- write_value(&mut buf, &val).expect("Error encoding MsgPack");
+ write_value(&mut buf, val).expect("Error encoding MsgPack");
// Add size bytes at the start
// Extracted from BinaryMessageFormat.js
let mut size: usize = buf.len();
@@ -329,7 +314,6 @@ impl WebSocketUsers {
ut,
None,
);
-
self.send_update(&user.uuid, &data).await;
}
@@ -342,7 +326,6 @@ impl WebSocketUsers {
UpdateType::LogOut,
acting_device_uuid.clone(),
);
-
self.send_update(&user.uuid, &data).await;
}
@@ -361,7 +344,6 @@ impl WebSocketUsers {
ut,
Some(acting_device_uuid.into()),
);
-
self.send_update(&folder.user_uuid, &data).await;
}
@@ -511,7 +493,7 @@ fn create_update(
("Payload".into(), payload.into()),
])]),
]);
- serialize(value)
+ serialize(&value)
}
fn create_anonymous_update(
@@ -520,7 +502,6 @@ fn create_anonymous_update(
user_id: String,
) -> Vec<u8> {
use rmpv::Value as V;
-
let value = V::Array(vec![
1i32.into(),
V::Map(vec![]),
@@ -532,12 +513,11 @@ fn create_anonymous_update(
("UserId".into(), user_id.into()),
])]),
]);
-
- serialize(value)
+ serialize(&value)
}
fn create_ping() -> Vec<u8> {
- serialize(Value::Array(vec![6i32.into()]))
+ serialize(&Value::Array(vec![6i32.into()]))
}
#[allow(dead_code)]
diff --git a/src/api/web.rs b/src/api/web.rs
@@ -23,7 +23,7 @@ pub fn catchers() -> Vec<Catcher> {
if config::get_config().web_vault_enabled {
catchers![not_found]
} else {
- catchers![]
+ Vec::new()
}
}
@@ -56,7 +56,6 @@ const fn web_index_head() -> EmptyResult {
#[get("/app-id.json")]
fn app_id() -> Cached<(ContentType, Json<Value>)> {
let content_type = ContentType::new("application", "fido.trusted-apps+json");
-
Cached::long(
(
content_type,
@@ -103,7 +102,6 @@ async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Op
if claims.sub != *uuid || claims.file_id != *file_id {
return None;
}
-
NamedFile::open(
Path::new(Config::ATTACHMENTS_FOLDER)
.join(uuid)
@@ -113,17 +111,15 @@ async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Op
.ok()
}
-// We use DbConn here to let the alive healthcheck also verify the database connection.
use crate::db::DbConn;
+#[allow(clippy::needless_pass_by_value)]
#[get("/alive")]
fn alive(_conn: DbConn) -> Json<String> {
now()
}
-#[allow(clippy::unnecessary_wraps)]
+#[allow(clippy::needless_pass_by_value, clippy::unnecessary_wraps)]
#[head("/alive")]
fn alive_head(_conn: DbConn) -> EmptyResult {
- // Avoid logging spurious "No matching routes for HEAD /alive" errors
- // due to <https://github.com/SergioBenitez/Rocket/issues/1098>.
Ok(())
}
#[get("/vw_static/<filename>", rank = 2)]
diff --git a/src/auth.rs b/src/auth.rs
@@ -194,13 +194,13 @@ pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
}
}
+#[allow(clippy::match_same_arms)]
fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Error> {
let mut validation = jsonwebtoken::Validation::new(JWT_ALGORITHM);
validation.leeway = 30; // 30 seconds
validation.validate_exp = true;
validation.validate_nbf = true;
validation.set_issuer(&[issuer]);
-
let token = token.replace(char::is_whitespace, "");
match jsonwebtoken::decode(&token, get_public_rsa_key(), &validation) {
Ok(d) => Ok(d.claims),
@@ -208,31 +208,41 @@ fn decode_jwt<T: DeserializeOwned>(token: &str, issuer: String) -> Result<T, Err
ErrorKind::InvalidToken => err!("Token is invalid"),
ErrorKind::InvalidIssuer => err!("Issuer is invalid"),
ErrorKind::ExpiredSignature => err!("Token has expired"),
+ ErrorKind::InvalidSignature
+ | ErrorKind::InvalidEcdsaKey
+ | ErrorKind::InvalidRsaKey(_)
+ | ErrorKind::RsaFailedSigning
+ | ErrorKind::InvalidAlgorithmName
+ | ErrorKind::InvalidKeyFormat
+ | ErrorKind::MissingRequiredClaim(_)
+ | ErrorKind::InvalidAudience
+ | ErrorKind::InvalidSubject
+ | ErrorKind::ImmatureSignature
+ | ErrorKind::InvalidAlgorithm
+ | ErrorKind::MissingAlgorithm
+ | ErrorKind::Base64(_)
+ | ErrorKind::Json(_)
+ | ErrorKind::Utf8(_)
+ | ErrorKind::Crypto(_) => err!("Error decoding JWT"),
_ => err!("Error decoding JWT"),
},
}
}
-
pub fn decode_login(token: &str) -> Result<LoginJwtClaims, Error> {
decode_jwt(token, get_jwt_login_issuer().to_owned())
}
-
pub fn decode_invite(token: &str) -> Result<InviteJwtClaims, Error> {
decode_jwt(token, get_jwt_invite_issuer().to_owned())
}
-
pub fn decode_delete(token: &str) -> Result<BasicJwtClaims, Error> {
decode_jwt(token, get_jwt_delete_issuer().to_owned())
}
-
pub fn decode_verify_email(token: &str) -> Result<BasicJwtClaims, Error> {
decode_jwt(token, get_jwt_verifyemail_issuer().to_owned())
}
-
pub fn decode_api_org(token: &str) -> Result<OrgApiKeyLoginJwtClaims, Error> {
decode_jwt(token, get_jwt_org_api_key_issuer().to_owned())
}
-
pub fn decode_file_download(token: &str) -> Result<FileDownloadClaims, Error> {
decode_jwt(token, get_jwt_file_download_issuer().to_owned())
}
@@ -342,7 +352,6 @@ pub struct FileDownloadClaims {
pub sub: String,
pub file_id: String,
}
-
#[derive(Debug, Serialize, Deserialize)]
pub struct BasicJwtClaims {
// Not before
@@ -354,18 +363,16 @@ pub struct BasicJwtClaims {
// Subject
pub sub: String,
}
-
-use rocket::{
- outcome::try_outcome,
- request::{FromRequest, Outcome, Request},
-};
-
use crate::db::{
models::{
Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException,
},
DbConn,
};
+use rocket::{
+ outcome::try_outcome,
+ request::{FromRequest, Outcome, Request},
+};
pub struct Host {
pub host: String,
@@ -374,7 +381,6 @@ pub struct Host {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for Host {
type Error = &'static str;
-
async fn from_request(_: &'r Request<'_>) -> Outcome<Self, Self::Error> {
Outcome::Success(Self {
host: config::get_config().domain.to_string(),
@@ -391,7 +397,6 @@ pub struct ClientHeaders {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for ClientHeaders {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let host = try_outcome!(Host::from_request(request).await).host;
let Outcome::Success(ip) = ClientIp::from_request(request).await else {
@@ -427,7 +432,6 @@ impl<'r> FromRequest<'r> for Headers {
let Outcome::Success(ip) = ClientIp::from_request(request).await else {
err_handler!("Error getting Client IP")
};
-
// Get access_token
let access_token: &str = match headers.get_one("Authorization") {
Some(a) => match a.rsplit("Bearer ").next() {
@@ -436,24 +440,19 @@ impl<'r> FromRequest<'r> for Headers {
},
None => err_handler!("No access token provided"),
};
-
// Check JWT token is valid and get device and user from it
let Ok(claims) = decode_login(access_token) else {
err_handler!("Invalid claim")
};
-
let device_uuid = claims.device;
let user_uuid = claims.sub;
-
let Outcome::Success(conn) = DbConn::from_request(request).await else {
err_handler!("Error getting DB")
};
-
let Some(device) = Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &conn).await
else {
err_handler!("Invalid device id")
};
-
let Some(user) = User::find_by_uuid(&user_uuid, &conn).await else {
err_handler!("Device has no user associated")
};
@@ -511,10 +510,8 @@ pub struct OrgHeaders {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for OrgHeaders {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(Headers::from_request(request).await);
-
// org_id is usually the second path param ("/organizations/<org_id>"),
// but there are cases where it is a query value.
// First check the path, if this is not a valid uuid, try the query values.
@@ -525,16 +522,13 @@ impl<'r> FromRequest<'r> for OrgHeaders {
url_org_id = Some(org_id);
}
}
-
if let Some(Ok(org_id)) = request.query_value::<&str>("organizationId") {
if uuid::Uuid::parse_str(org_id).is_ok() {
url_org_id = Some(org_id);
}
}
-
url_org_id
};
-
match url_org_id {
Some(org_id) => {
let user = headers.user;
@@ -557,7 +551,7 @@ impl<'r> FromRequest<'r> for OrgHeaders {
}
}
}
- _ => err_handler!("Error getting DB"),
+ Outcome::Error(_) | Outcome::Forward(_) => err_handler!("Error getting DB"),
};
Outcome::Success(Self {
host: headers.host,
@@ -593,7 +587,6 @@ pub struct AdminHeaders {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for AdminHeaders {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await);
let client_version = request
@@ -635,13 +628,11 @@ fn get_col_id(request: &Request<'_>) -> Option<String> {
return Some(col_id);
}
}
-
if let Some(Ok(col_id)) = request.query_value::<String>("collectionId") {
if uuid::Uuid::parse_str(&col_id).is_ok() {
return Some(col_id);
}
}
-
None
}
@@ -659,7 +650,6 @@ pub struct ManagerHeaders {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for ManagerHeaders {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type >= UserOrgType::Manager {
@@ -714,7 +704,6 @@ pub struct ManagerHeadersLoose {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for ManagerHeadersLoose {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type >= UserOrgType::Manager {
@@ -762,7 +751,6 @@ impl ManagerHeaders {
err!("You don't have access to all collections!");
}
}
-
Ok(Self {
host: h.host,
device: h.device,
@@ -783,7 +771,6 @@ pub struct OwnerHeaders {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for OwnerHeaders {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = try_outcome!(OrgHeaders::from_request(request).await);
if headers.org_user_type == UserOrgType::Owner {
@@ -803,7 +790,7 @@ impl<'r> FromRequest<'r> for OwnerHeaders {
// Client IP address detection
//
use std::net::IpAddr;
-
+#[derive(Clone, Copy)]
pub struct ClientIp {
pub ip: IpAddr,
}
@@ -811,7 +798,7 @@ pub struct ClientIp {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for ClientIp {
type Error = ();
- #[allow(clippy::map_err_ignore)]
+ #[allow(clippy::map_err_ignore, clippy::string_slice)]
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let ip = req.headers().get_one("X-Real-IP").and_then(|ip| {
ip.find(',')
@@ -820,11 +807,9 @@ impl<'r> FromRequest<'r> for ClientIp {
.map_err(|_| warn!("'X-Real-IP' header is malformed: {ip}"))
.ok()
});
-
let ip = ip
.or_else(|| req.remote().map(|r| r.ip()))
.unwrap_or_else(|| "0.0.0.0".parse().unwrap());
-
Outcome::Success(Self { ip })
}
}
@@ -836,7 +821,6 @@ pub struct WsAccessTokenHeader {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for WsAccessTokenHeader {
type Error = ();
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = request.headers();
let access_token = headers
diff --git a/src/crypto.rs b/src/crypto.rs
@@ -7,10 +7,8 @@ const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
pub fn hash_password(secret: &[u8], salt: &[u8], iterations: u32) -> Vec<u8> {
let mut out = vec![0u8; OUTPUT_LEN]; // Initialize array with zeros
-
let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero");
pbkdf2::derive(DIGEST_ALG, iterations, salt, secret, &mut out);
-
out
}
@@ -25,7 +23,6 @@ pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterati
/// Return an array holding `N` random bytes.
pub fn get_random_bytes<const N: usize>() -> [u8; N] {
use ring::rand::{SecureRandom, SystemRandom};
-
let mut array = [0; N];
SystemRandom::new()
.fill(&mut array)
@@ -35,7 +32,7 @@ pub fn get_random_bytes<const N: usize>() -> [u8; N] {
}
/// Encode random bytes using the provided function.
-pub fn encode_random_bytes<const N: usize>(e: Encoding) -> String {
+pub fn encode_random_bytes<const N: usize>(e: &Encoding) -> String {
e.encode(&get_random_bytes::<N>())
}
@@ -44,7 +41,6 @@ fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
use rand::Rng;
let mut rng = rand::thread_rng();
-
(0..num_chars)
.map(|_| {
let i = rng.gen_range(0..alphabet.len());
@@ -72,6 +68,5 @@ pub fn generate_api_key() -> String {
//
pub fn ct_eq<T: AsRef<[u8]>, U: AsRef<[u8]>>(a: T, b: U) -> bool {
use ring::constant_time::verify_slices_are_equal;
-
verify_slices_are_equal(a.as_ref(), b.as_ref()).is_ok()
}
diff --git a/src/db/mod.rs b/src/db/mod.rs
@@ -20,7 +20,6 @@ use tokio::{
mod __sqlite_schema;
// These changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools
-
// A wrapper around spawn_blocking that propagates panics to the calling code.
async fn run_blocking<F, R>(job: F) -> R
where
diff --git a/src/db/models/auth_request.rs b/src/db/models/auth_request.rs
@@ -35,12 +35,10 @@ impl AuthRequest {
public_key: String,
) -> Self {
let now = Utc::now().naive_utc();
-
Self {
uuid: crate::util::get_uuid(),
user_uuid,
organization_uuid: None,
-
request_device_identifier,
device_type,
request_ip,
@@ -57,9 +55,8 @@ impl AuthRequest {
}
}
-use crate::db::DbConn;
-
use crate::api::EmptyResult;
+use crate::db::DbConn;
use crate::error::MapResult;
impl AuthRequest {
diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs
@@ -44,30 +44,23 @@ impl From<RepromptType> for i32 {
impl Cipher {
pub fn new(atype: i32, name: String) -> Self {
let now = Utc::now().naive_utc();
-
Self {
uuid: crate::util::get_uuid(),
created_at: now,
updated_at: now,
-
user_uuid: None,
organization_uuid: None,
-
key: None,
-
atype,
name,
-
notes: None,
fields: None,
-
data: String::new(),
password_history: None,
deleted_at: None,
reprompt: None,
}
}
-
pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
let mut validation_errors = serde_json::Map::new();
for (index, cipher) in cipher_data.iter().enumerate() {
@@ -117,7 +110,6 @@ impl Cipher {
.as_ref()
.and_then(|s| serde_json::from_str(s).ok())
.unwrap_or(Value::Null);
-
// We don't need these values at all for Organizational syncs
// Skip any other database calls if this is the case and just return false.
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
@@ -131,12 +123,10 @@ impl Cipher {
} else {
(false, false)
};
-
// Get the type_data or a default to an empty json object '{}'.
// If not passing an empty object, mobile clients will crash.
let mut type_data_json: Value = serde_json::from_str(&self.data)
.unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
-
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
if self.atype == 1i32 {
@@ -148,10 +138,8 @@ impl Cipher {
type_data_json["Uri"] = Value::Null;
}
}
-
// Clone the type_data and add some default value.
let mut data_json = type_data_json.clone();
-
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// data_json should always contain the following keys with every atype
data_json["Fields"] = fields_json.clone();
@@ -166,7 +154,6 @@ impl Cipher {
} else {
Cow::from(self.get_collections(user_uuid.to_owned(), conn).await)
};
-
// There are three types of cipher response models in upstream
// Bitwarden: "cipherMini", "cipher", and "cipherDetails" (in order
// of increasing level of detail). vaultwarden currently only
@@ -188,25 +175,19 @@ impl Cipher {
// We have UseTotp set to true by default within the Organization model.
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
"OrganizationUseTotp": true,
-
// This field is specific to the cipherDetails type.
"CollectionIds": collection_ids,
-
"Name": self.name,
"Notes": self.notes,
"Fields": fields_json,
-
"Data": data_json,
-
"PasswordHistory": password_history_json,
-
// All Cipher types are included by default as null, but only the matching one will be populated
"Login": null,
"SecureNote": null,
"Card": null,
"Identity": null,
});
-
// These values are only needed for user/default syncs
// Not during an organizational sync like `get_org_details`
// Skip adding these fields in that case
@@ -268,7 +249,6 @@ impl Cipher {
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
self.update_users_revision(conn).await;
self.updated_at = Utc::now().naive_utc();
-
db_run! { conn:
{
match diesel::replace_into(ciphers::table)
@@ -292,11 +272,9 @@ impl Cipher {
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
self.update_users_revision(conn).await;
-
FolderCipher::delete_all_by_cipher(&self.uuid, conn).await?;
CollectionCipher::delete_all_by_cipher(&self.uuid, conn).await?;
Favorite::delete_all_by_cipher(&self.uuid, conn).await?;
-
db_run! { conn: {
diesel::delete(ciphers::table.filter(ciphers::uuid.eq(&self.uuid)))
.execute(conn)
@@ -326,15 +304,12 @@ impl Cipher {
conn: &DbConn,
) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
-
match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) {
// No changes
(None, None) => Ok(()),
(Some(ref old), Some(ref new)) if old == new => Ok(()),
-
// Add to folder
(None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await,
-
// Remove from folder
(Some(old), None) => {
match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await {
@@ -342,7 +317,6 @@ impl Cipher {
None => err!("Couldn't move from previous folder"),
}
}
-
// Move to another folder
(Some(old), Some(new)) => {
if let Some(old) =
@@ -354,7 +328,6 @@ impl Cipher {
}
}
}
-
/// Returns whether this cipher is directly owned by the user.
fn is_owned_by_user(&self, user_uuid: &str) -> bool {
self.user_uuid.is_some() && self.user_uuid.as_ref().unwrap() == user_uuid
@@ -439,7 +412,6 @@ impl Cipher {
read_only &= tup.0;
hide_passwords &= tup.1;
}
-
Some((read_only, hide_passwords))
}
@@ -549,25 +521,21 @@ impl Cipher {
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
.into_boxed();
-
if !visible_only {
query = query.or_filter(
users_organizations::atype.le(i32::from(UserOrgType::Admin)) // Org admin/owner
);
}
-
query
.select(ciphers::all_columns)
.distinct()
.load::<CipherDb>(conn).expect("Error loading ciphers").from_db()
}}
}
-
// Find all ciphers visible to the specified user.
pub async fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
Self::find_by_user(user_uuid, true, conn).await
}
-
// Find all ciphers directly owned by the specified user.
pub async fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
diff --git a/src/db/models/collection.rs b/src/db/models/collection.rs
@@ -40,7 +40,6 @@ impl Collection {
name,
external_id: None,
};
-
new_model.set_external_id(external_id);
new_model
}
@@ -91,7 +90,6 @@ impl Collection {
self.hide_passwords_for_user(user_uuid, conn).await,
)
};
-
let mut json_object = self.to_json();
json_object["Object"] = json!("collectionDetails");
json_object["ReadOnly"] = json!(read_only);
@@ -100,16 +98,14 @@ impl Collection {
}
}
-use crate::db::DbConn;
-
use crate::api::EmptyResult;
+use crate::db::DbConn;
use crate::error::MapResult;
/// Database methods
impl Collection {
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
self.update_users_revision(conn).await;
-
db_run! { conn:
{
match diesel::replace_into(collections::table)
@@ -197,7 +193,7 @@ impl Collection {
// Check if a user has access to a specific collection
// FIXME: This needs to be reviewed. The query used by `find_by_user_uuid` could be adjusted to filter when needed.
- // For now this is a good solution without making to much changes.
+ // For now this is a good solution without making too much changes.
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
@@ -379,7 +375,6 @@ impl CollectionUser {
conn: &DbConn,
) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
-
db_run! { conn:
{
match diesel::replace_into(users_collections::table)
@@ -414,7 +409,6 @@ impl CollectionUser {
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
-
db_run! { conn: {
diesel::delete(
users_collections::table
@@ -483,7 +477,6 @@ impl CollectionUser {
for collection in &Self::find_by_collection(collection_uuid, conn).await {
User::update_uuid_revision(&collection.user_uuid, conn).await;
}
-
db_run! { conn: {
diesel::delete(users_collections::table.filter(users_collections::collection_uuid.eq(collection_uuid)))
.execute(conn)
@@ -498,7 +491,6 @@ impl CollectionUser {
) -> EmptyResult {
let collectionusers =
Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
-
db_run! { conn: {
for user in collectionusers {
diesel::delete(users_collections::table.filter(
@@ -517,7 +509,6 @@ impl CollectionUser {
impl CollectionCipher {
pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
Self::update_users_revision(collection_uuid, conn).await;
-
db_run! { conn:
{
// Not checking for ForeignKey Constraints here.
diff --git a/src/db/models/device.rs b/src/db/models/device.rs
@@ -25,16 +25,13 @@ db_object! {
impl Device {
pub fn new(uuid: String, user_uuid: String, name: String, atype: i32) -> Self {
let now = Utc::now().naive_utc();
-
Self {
uuid,
created_at: now,
updated_at: now,
-
user_uuid,
name,
atype,
-
push_uuid: None,
push_token: None,
refresh_token: String::new(),
@@ -49,19 +46,17 @@ impl Device {
pub fn refresh_tokens(
&mut self,
user: &super::User,
- orgs: Vec<super::UserOrganization>,
+ orgs: &[super::UserOrganization],
scope: Vec<String>,
) -> (String, i64) {
// If there is no refresh token, we create one
if self.refresh_token.is_empty() {
use data_encoding::BASE64URL;
- self.refresh_token = crypto::encode_random_bytes::<64>(BASE64URL);
+ self.refresh_token = crypto::encode_random_bytes::<64>(&BASE64URL);
}
-
// Update the expiration of the device and the last update date
let time_now = Utc::now().naive_utc();
self.updated_at = time_now;
-
let orgowner: Vec<_> = orgs
.iter()
.filter(|o| o.atype == 0i32)
@@ -82,7 +77,6 @@ impl Device {
.filter(|o| o.atype == 3i32)
.map(|o| o.org_uuid.clone())
.collect();
-
// Create the JWT claims struct, to send to the client
use crate::auth::{self, encode_jwt, LoginJwtClaims};
let claims = LoginJwtClaims {
@@ -113,16 +107,14 @@ impl Device {
}
}
-use crate::db::DbConn;
-
use crate::api::EmptyResult;
+use crate::db::DbConn;
use crate::error::MapResult;
/// Database methods
impl Device {
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
self.updated_at = Utc::now().naive_utc();
-
db_run! { conn:
{
crate::util::retry(
diff --git a/src/db/models/favorite.rs b/src/db/models/favorite.rs
@@ -20,7 +20,6 @@ impl Favorite {
.filter(favorites::cipher_uuid.eq(cipher_uuid))
.filter(favorites::user_uuid.eq(user_uuid))
.count();
-
query.first::<i64>(conn).ok().unwrap_or(0) != 0
}}
}
diff --git a/src/db/models/folder.rs b/src/db/models/folder.rs
@@ -36,10 +36,8 @@ impl Folder {
name,
}
}
-
pub fn to_json(&self) -> Value {
use crate::util::format_date;
-
json!({
"Id": self.uuid,
"RevisionDate": format_date(&self.updated_at),
@@ -67,7 +65,6 @@ impl Folder {
pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
self.updated_at = Utc::now().naive_utc();
-
db_run! { conn:
{
match diesel::replace_into(folders::table)
@@ -92,7 +89,6 @@ impl Folder {
pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
FolderCipher::delete_all_by_folder(&self.uuid, conn).await?;
-
db_run! { conn: {
diesel::delete(folders::table.filter(folders::uuid.eq(&self.uuid)))
.execute(conn)
diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs
@@ -205,7 +205,6 @@ impl OrgPolicy {
if exclude_org_uuid.is_some() && exclude_org_uuid.unwrap() == policy.org_uuid {
continue;
}
-
if let Some(user) =
UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await
{
@@ -246,7 +245,6 @@ impl OrgPolicy {
{
return Err(OrgPolicyErr::SingleOrgEnforced);
}
-
Ok(())
}
@@ -261,7 +259,6 @@ impl OrgPolicy {
}
None => return false,
}
-
false
}
}
diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs
@@ -191,7 +191,6 @@ impl Organization {
"Identifier": null, // not supported by us
"Name": self.name,
"Seats": 10i32, // The value doesn't matter, we don't check server-side
- // "MaxAutoscaleSeats": null, // The value doesn't matter, we don't check server-side
"MaxCollections": 10i32, // The value doesn't matter, we don't check server-side
"MaxStorageGb": 10i32, // The value doesn't matter, we don't check server-side
"Use2fa": true,
@@ -200,9 +199,7 @@ impl Organization {
"UseGroups": false,
"UseTotp": true,
"UsePolicies": true,
- // "UseScim": false, // Not supported (Not AGPLv3 Licensed)
"UseSso": false, // Not supported
- // "UseKeyConnector": false, // Not supported
"SelfHost": true,
"UseApi": true,
"HasPublicAndPrivateKeys": self.private_key.is_some() && self.public_key.is_some(),
@@ -231,10 +228,8 @@ impl UserOrganization {
pub fn new(user_uuid: String, org_uuid: String) -> Self {
Self {
uuid: crate::util::get_uuid(),
-
user_uuid,
org_uuid,
-
access_all: false,
akey: String::new(),
status: i32::from(UserOrgStatus::Accepted),
@@ -286,14 +281,12 @@ impl OrganizationApiKey {
pub fn new(org_uuid: String, api_key: String) -> Self {
Self {
uuid: crate::util::get_uuid(),
-
org_uuid,
atype: 0, // Type 0 is the default and only type we support currently
api_key,
revision_date: Utc::now().naive_utc(),
}
}
-
pub fn check_valid_api_key(&self, api_key: &str) -> bool {
crate::crypto::ct_eq(&self.api_key, api_key)
}
@@ -309,7 +302,6 @@ impl Organization {
for user_org in &UserOrganization::find_by_org(&self.uuid, conn).await {
User::update_uuid_revision(&user_org.user_uuid, conn).await;
}
-
db_run! { conn:
{
match diesel::replace_into(organizations::table)
@@ -327,19 +319,16 @@ impl Organization {
}
Err(e) => Err(e.into()),
}.map_res("Error saving organization")
-
}
}
}
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
use super::{Cipher, Collection};
-
Cipher::delete_all_by_organization(&self.uuid, conn).await?;
Collection::delete_all_by_organization(&self.uuid, conn).await?;
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
-
db_run! { conn: {
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
.execute(conn)
@@ -362,7 +351,6 @@ impl UserOrganization {
let org = Organization::find_by_uuid(&self.org_uuid, conn)
.await
.unwrap();
-
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs
json!({
"Id": self.org_uuid,
@@ -376,7 +364,6 @@ impl UserOrganization {
"UseEvents": false,
"UseGroups": false,
"UseTotp": true,
- // "UseScim": false, // Not supported (Not AGPLv3 Licensed)
"UsePolicies": true,
"UseApi": true,
"SelfHost": true,
@@ -398,7 +385,6 @@ impl UserOrganization {
}
pub async fn to_json_user_details(&self, include_collections: bool, conn: &DbConn) -> Value {
let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap();
-
// Because BitWarden want the status to be -1 for revoked users we need to catch that here.
// We subtract/add a number so we can restore/activate the user to it's previous state again.
let status = if self.status < i32::from(UserOrgStatus::Revoked) {
@@ -437,13 +423,11 @@ impl UserOrganization {
"ExternalId": self.external_id,
"Groups": groups,
"Collections": collections,
-
"Status": status,
"Type": self.atype,
"AccessAll": self.access_all,
"TwoFactorEnabled": twofactor_enabled,
"ResetPasswordEnrolled": self.reset_password_key.is_some(),
-
"Object": "organizationUserUserDetails",
})
}
@@ -458,7 +442,6 @@ impl UserOrganization {
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
-
db_run! { conn:
{
match diesel::replace_into(users_organizations::table)
@@ -482,9 +465,7 @@ impl UserOrganization {
pub async fn delete(self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
-
CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn).await?;
-
db_run! { conn: {
diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid)))
.execute(conn)
diff --git a/src/db/models/user.rs b/src/db/models/user.rs
@@ -1,8 +1,7 @@
-use chrono::{Duration, NaiveDateTime, Utc};
-use serde_json::Value;
-
use crate::config;
use crate::crypto;
+use chrono::{Duration, NaiveDateTime, Utc};
+use serde_json::Value;
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
@@ -87,7 +86,7 @@ impl User {
}
pub const CLIENT_KDF_ITER_DEFAULT: i32 = 600_000i32;
- pub fn new(email: String) -> Self {
+ pub fn new(email: &str) -> Self {
let now = Utc::now().naive_utc();
let email = email.to_lowercase();
Self {
@@ -169,15 +168,12 @@ impl User {
u32::try_from(self.password_iterations)
.expect("underflow converting password iterations into a u32"),
);
-
if let Some(route) = allow_next_route {
self.set_stamp_exception(route);
}
-
if let Some(new_key) = new_key {
self.akey = new_key;
}
-
if reset_security_stamp {
self.reset_security_stamp();
}
@@ -225,16 +221,13 @@ impl User {
for c in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
orgs_json.push(c.to_json(conn).await);
}
-
let twofactor_enabled = !TwoFactor::find_by_user(&self.uuid, conn).await.is_empty();
-
// TODO: Might want to save the status field in the DB
let status = if self.password_hash.is_empty() {
UserStatus::Invited
} else {
UserStatus::Enabled
};
-
json!({
"_Status": i32::from(status),
"Id": self.uuid,
@@ -261,9 +254,7 @@ impl User {
if self.email.trim().is_empty() {
err!("User email can't be empty")
}
-
self.updated_at = Utc::now().naive_utc();
-
db_run! {conn:
{
match diesel::replace_into(users::table)
@@ -305,7 +296,6 @@ impl User {
Folder::delete_all_by_user(&self.uuid, conn).await?;
Device::delete_all_by_user(&self.uuid, conn).await?;
TwoFactor::delete_all_by_user(&self.uuid, conn).await?;
-
db_run! {conn: {
diesel::delete(users::table.filter(users::uuid.eq(self.uuid)))
.execute(conn)
@@ -321,7 +311,6 @@ impl User {
pub async fn update_all_revisions(conn: &DbConn) -> EmptyResult {
let updated_at = Utc::now().naive_utc();
-
db_run! {conn: {
crate::util::retry(|| {
diesel::update(users::table)
@@ -334,7 +323,6 @@ impl User {
pub async fn update_revision(&mut self, conn: &DbConn) -> EmptyResult {
self.updated_at = Utc::now().naive_utc();
-
Self::_update_revision(&self.uuid, &self.updated_at, conn).await
}
diff --git a/src/error.rs b/src/error.rs
@@ -131,7 +131,18 @@ impl std::fmt::Debug for Error {
}
}
ErrorKind::Json(_) => write!(f, "{}", self.message),
- _ => unreachable!(),
+ ErrorKind::Db(_)
+ | ErrorKind::R2d2(_)
+ | ErrorKind::Serde(_)
+ | ErrorKind::JWt(_)
+ | ErrorKind::Io(_)
+ | ErrorKind::Time(_)
+ | ErrorKind::Regex(_)
+ | ErrorKind::OpenSSL(_)
+ | ErrorKind::Rocket(_)
+ | ErrorKind::DieselCon(_)
+ | ErrorKind::Webauthn(_)
+ | ErrorKind::WebSocket(_) => unreachable!(),
},
}
}
diff --git a/src/main.rs b/src/main.rs
@@ -32,7 +32,6 @@
clippy::multiple_crate_versions,
clippy::multiple_inherent_impl,
clippy::multiple_unsafe_ops_per_block,
- clippy::needless_pass_by_value,
clippy::no_effect_underscore_binding,
clippy::panic,
clippy::panic_in_result_fn,
@@ -48,14 +47,12 @@
clippy::single_char_lifetime_names,
clippy::std_instead_of_alloc,
clippy::std_instead_of_core,
- clippy::string_slice,
clippy::too_many_lines,
clippy::unreachable,
clippy::unseparated_literal_suffix,
clippy::unwrap_in_result,
clippy::unwrap_used,
- clippy::used_underscore_binding,
- clippy::wildcard_enum_match_arm
+ clippy::used_underscore_binding
)]
// The recursion_limit is mainly triggered by the json!() macro.
// The more key/value pairs there are the more recursion occurs.
@@ -85,10 +82,10 @@ mod util;
use alloc::sync::Arc;
use config::Config;
pub use error::{Error, MapResult};
-use priv_sep::unveil_create_read_write;
use std::env;
-use std::{fs::create_dir_all, path::Path, process::exit};
+use std::{fs, path::Path, process};
use tokio::runtime::Builder;
+pub const VERSION: &str = env!("CARGO_PKG_VERSION");
fn main() -> Result<(), Error> {
let mut promises = priv_sep::pledge_init()?;
@@ -127,7 +124,7 @@ fn main() -> Result<(), Error> {
#[inline]
fn static_init() {
config::init_config();
- unveil_create_read_write(Config::DATA_FOLDER).unwrap_or_else(|_| {
+ priv_sep::unveil_create_read_write(Config::DATA_FOLDER).unwrap_or_else(|_| {
panic!(
"unable to unveil(2) {} with create, read, and write permissions",
Config::DATA_FOLDER
@@ -137,33 +134,26 @@ fn static_init() {
api::init_ws_users();
api::init_ws_anonymous_subscriptions();
}
-pub const VERSION: &str = env!("CARGO_PKG_VERSION");
fn create_dir(path: &str, description: &str) {
- // Try to create the specified dir, if it doesn't already exist.
- let err_msg = format!("Error creating {description} directory '{path}'");
- create_dir_all(path).expect(&err_msg);
+ fs::create_dir_all(path)
+ .unwrap_or_else(|_| panic!("Error creating {description} directory '{path}'"));
}
#[allow(clippy::exit)]
fn check_data_folder() {
- let data_folder = Config::DATA_FOLDER;
- let path = Path::new(data_folder);
- if !path.exists() {
- exit(1);
- }
- if !path.is_dir() {
- exit(1);
+ if !Path::new(Config::DATA_FOLDER).is_dir() {
+ process::exit(1);
}
}
#[allow(clippy::exit)]
fn check_web_vault() {
- if !config::get_config().web_vault_enabled {
- return;
- }
- let index_path = Path::new(Config::WEB_VAULT_FOLDER).join("index.html");
- if !index_path.exists() {
- exit(1);
+ if config::get_config().web_vault_enabled
+ && !Path::new(Config::WEB_VAULT_FOLDER)
+ .join("index.html")
+ .exists()
+ {
+ process::exit(1);
}
}
#[allow(clippy::exit)]
@@ -175,7 +165,7 @@ async fn create_db_pool() -> db::DbPool {
.await)
.map_or_else(
|_| {
- exit(1);
+ process::exit(1);
},
|p| p,
)
diff --git a/src/util.rs b/src/util.rs
@@ -49,13 +49,11 @@ impl Fairing for AppHeaders {
(_, _) => (),
}
}
-
res.set_raw_header("Permissions-Policy", "accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()");
res.set_raw_header("Referrer-Policy", "same-origin");
res.set_raw_header("X-Content-Type-Options", "nosniff");
// Obsolete in modern browsers, unsafe (XS-Leak), and largely replaced by CSP
res.set_raw_header("X-XSS-Protection", "0");
-
// Do not send the Content-Security-Policy (CSP) Header and X-Frame-Options for the *-connector.html files.
// This can cause issues when some MFA requests needs to open a popup or page within the clients like WebAuthn.
// This is the same behavior as upstream Bitwarden.
@@ -106,7 +104,6 @@ impl Fairing for AppHeaders {
res.set_raw_header("Content-Security-Policy", csp);
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
}
-
// Disable cache unless otherwise specified
if !res.headers().contains("cache-control") {
res.set_raw_header("Cache-Control", "no-cache, no-store, max-age=0");
@@ -122,7 +119,6 @@ impl Cors {
.get_one(name)
.map_or_else(String::new, std::string::ToString::to_string)
}
-
// Check a request's `Origin` header against the list of allowed origins.
// If a match exists, return it. Otherwise, return None.
fn get_allowed_origin(headers: &HeaderMap<'_>) -> Option<String> {
@@ -148,16 +144,13 @@ impl Fairing for Cors {
async fn on_response<'r>(&self, request: &'r Request<'_>, response: &mut Response<'r>) {
let req_headers = request.headers();
-
if let Some(origin) = Self::get_allowed_origin(req_headers) {
response.set_header(Header::new("Access-Control-Allow-Origin", origin));
}
-
// Preflight request
if request.method() == Method::Options {
let req_allow_headers = Self::get_header(req_headers, "Access-Control-Request-Headers");
let req_allow_method = Self::get_header(req_headers, "Access-Control-Request-Method");
-
response.set_header(Header::new(
"Access-Control-Allow-Methods",
req_allow_method,
@@ -188,7 +181,6 @@ impl<R> Cached<R> {
ttl: 604_800, // 7 days
}
}
-
pub const fn short(response: R, is_immutable: bool) -> Self {
Self {
response,
@@ -196,7 +188,6 @@ impl<R> Cached<R> {
ttl: 600, // 10 minutes
}
}
-
pub const fn ttl(response: R, ttl: u64, is_immutable: bool) -> Self {
Self {
response,
@@ -208,14 +199,12 @@ impl<R> Cached<R> {
impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cached<R> {
fn respond_to(self, request: &'r Request<'_>) -> response::Result<'static> {
let mut res = self.response.respond_to(request)?;
-
let cache_control_header = if self.is_immutable {
format!("public, immutable, max-age={}", self.ttl)
} else {
format!("public, max-age={}", self.ttl)
};
res.set_raw_header("Cache-Control", cache_control_header);
-
let time_now = chrono::Local::now();
let expiry_time = time_now
.checked_add_signed(chrono::Duration::seconds(self.ttl.try_into().unwrap()))
@@ -234,7 +223,6 @@ impl std::fmt::Display for SafeString {
impl Deref for SafeString {
type Target = String;
-
fn deref(&self) -> &Self::Target {
&self.0
}
@@ -249,7 +237,6 @@ impl AsRef<Path> for SafeString {
impl<'r> FromParam<'r> for SafeString {
type Error = ();
-
#[inline]
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
if param
@@ -267,13 +254,7 @@ use std::path::Path;
pub fn get_uuid() -> String {
uuid::Uuid::new_v4().to_string()
}
-
-//
-// String util methods
-//
-
use std::str::FromStr;
-
#[inline]
fn upcase_first(s: &str) -> String {
let mut c = s.chars();
@@ -283,7 +264,6 @@ fn upcase_first(s: &str) -> String {
val
})
}
-
#[inline]
fn lcase_first(s: &str) -> String {
let mut c = s.chars();
@@ -293,7 +273,6 @@ fn lcase_first(s: &str) -> String {
val
})
}
-
pub fn try_parse_string<S, T>(string: Option<S>) -> Option<T>
where
S: AsRef<str>,
@@ -306,16 +285,13 @@ where
}
}
use chrono::{DateTime, Local, NaiveDateTime};
-
// Format used by Bitwarden API
const DATETIME_FORMAT: &str = "%Y-%m-%dT%H:%M:%S%.6fZ";
-
/// Formats a UTC-offset `NaiveDateTime` in the format used by Bitwarden API
/// responses with "date" fields (`CreationDate`, `RevisionDate`, etc.).
pub fn format_date(dt: &NaiveDateTime) -> String {
dt.format(DATETIME_FORMAT).to_string()
}
-
/// Formats a `DateTime<Local>` as required for HTTP
///
/// [http](https://httpwg.org/specs/rfc7231.html#http.date)
@@ -327,15 +303,9 @@ fn format_datetime_http(dt: &DateTime<Local>) -> String {
// offset (which would always be 0 in UTC anyway)
expiry_time.to_rfc2822().replace("+0000", "GMT")
}
-//
-// Deserialization methods
-//
-
-use std::fmt;
-
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
use serde_json::{self, Value};
-
+use std::fmt;
type JsonMap = serde_json::Map<String, Value>;
#[derive(Serialize, Deserialize)]
@@ -359,11 +329,9 @@ struct UpCaseVisitor;
impl<'de> Visitor<'de> for UpCaseVisitor {
type Value = Value;
-
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("an object or an array")
}
-
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
@@ -376,7 +344,6 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
Ok(Value::Object(result_map))
}
-
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
@@ -394,7 +361,6 @@ impl<'de> Visitor<'de> for UpCaseVisitor {
fn upcase_value(value: Value) -> Value {
if let Value::Object(map) = value {
let mut new_value = Value::Object(serde_json::Map::new());
-
for (key, val) in map {
let processed_key = _process_key(&key);
new_value[processed_key] = upcase_value(val);
@@ -403,7 +369,6 @@ fn upcase_value(value: Value) -> Value {
} else if let Value::Array(array) = value {
// Initialize array with null values
let mut new_value = Value::Array(vec![Value::Null; array.len()]);
-
for (index, val) in array.into_iter().enumerate() {
new_value[index] = upcase_value(val);
}
@@ -422,10 +387,6 @@ fn _process_key(key: &str) -> String {
}
}
-//
-// Retry methods
-//
-
pub fn retry<F, T, E>(mut func: F, max_tries: u32) -> Result<T, E>
where
F: FnMut() -> Result<T, E>,
@@ -459,9 +420,7 @@ where
if tries >= max_tries && max_tries > 0 {
return Err(e);
}
-
warn!("Can't connect to database, retrying: {:?}", e);
-
sleep(Duration::from_millis(1_000)).await;
}
}
@@ -471,13 +430,11 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
match src_json {
Value::Array(elm) => {
let mut new_array: Vec<Value> = Vec::with_capacity(elm.len());
-
for obj in elm {
new_array.push(convert_json_key_lcase_first(obj));
}
Value::Array(new_array)
}
-
Value::Object(obj) => {
let mut json_map = JsonMap::new();
for tup in obj {
@@ -492,7 +449,6 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
for inner_obj in elm {
inner_array.push(convert_json_key_lcase_first(inner_obj.clone()));
}
-
json_map.insert(lcase_first(key.as_str()), Value::Array(inner_array));
}
(key, value) => {
@@ -500,9 +456,8 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
}
}
}
-
Value::Object(json_map)
}
- value => value,
+ value @ (Value::Null | Value::Bool(_) | Value::Number(_) | Value::String(_)) => value,
}
}