commit fd9bc91a387b8779e64e0dc82a5ea39f82e227c9
parent a1c6197b1d5774f2ef7debebff7a19678805bd51
Author: Zack Newman <zack@philomathiclife.com>
Date: Thu, 10 Oct 2024 13:52:58 -0600
upstream changes. convert some pascalcase into camelcase
Diffstat:
14 files changed, 275 insertions(+), 121 deletions(-)
diff --git a/Cargo.toml b/Cargo.toml
@@ -22,18 +22,18 @@ priv_sep = { version = "2.0.0", default-features = false, features = ["openbsd"]
[dependencies]
chrono = { version = "0.4.38", default-features = false, features = ["serde"] }
data-encoding = { version = "2.6.0", default-features = false }
-diesel = { version = "2.2.2", default-features = false, features = ["32-column-tables", "chrono", "r2d2", "sqlite"] }
+diesel = { version = "2.2.4", default-features = false, features = ["32-column-tables", "chrono", "r2d2", "sqlite"] }
jsonwebtoken = { version = "9.3.0", default-features = false, features = ["use_pem"] }
-libsqlite3-sys = { version = "0.29.0", default-features = false, features = ["bundled"] }
+libsqlite3-sys = { version = "0.30.1", default-features = false, features = ["bundled"] }
openssl = { version = "0.10.66", default-features = false }
paste = { version = "1.0.15", default-features = false }
rand = { version = "0.8.5", default-features = false, features = ["small_rng"] }
ring = { version = "0.17.8", default-features = false }
rocket = { version = "0.5.1", default-features = false, features = ["json", "tls"] }
semver = { version = "1.0.23", default-features = false }
-serde = { version = "1.0.208", default-features = false }
-serde_json = { version = "1.0.125", default-features = false }
-tokio = { version = "1.39.3", default-features = false }
+serde = { version = "1.0.210", default-features = false }
+serde_json = { version = "1.0.128", default-features = false }
+tokio = { version = "1.40.0", default-features = false }
toml = { version = "0.8.19", default-features = false, features = ["parse"] }
totp-lite = { version = "2.0.1", default-features = false }
url = { version = "2.5.2", default-features = false }
diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs
@@ -323,7 +323,7 @@ async fn post_rotatekey(data: Json<KeyData>, headers: Headers, conn: DbConn) ->
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
- Cipher::validate_notes(&key_data.ciphers)?;
+ Cipher::validate_cipher_data(&key_data.ciphers)?;
let user_uuid = &headers.user.uuid;
// Update folder data
for folder_data in key_data.folders {
diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs
@@ -226,7 +226,7 @@ pub struct CipherData {
identity: Option<Value>,
favorite: Option<bool>,
reprompt: Option<i32>,
- password_history: Option<Value>,
+ pub password_history: Option<Value>,
// These are used during key rotation
// 'Attachments' is unused, contains map of {id: filename}
#[allow(dead_code)]
@@ -283,10 +283,6 @@ async fn post_ciphers_create(
if data.cipher.organization_id.is_some() && data.collection_ids.is_empty() {
err!("You must select at least one collection.");
}
- // reverse sanity check to prevent corruptions
- if !data.collection_ids.is_empty() && data.cipher.organization_id.is_none() {
- err!("The client has not provided an organization id!");
- }
// This check is usually only needed in update_cipher_from_data(), but we
// need it here as well to avoid creating an empty cipher in the call to
// cipher.save() below.
@@ -492,7 +488,7 @@ async fn post_ciphers_import(
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
- Cipher::validate_notes(&data.ciphers)?;
+ Cipher::validate_cipher_data(&data.ciphers)?;
let existing_folders: Vec<String> = Folder::find_by_user(&headers.user.uuid, &conn)
.await
.into_iter()
@@ -637,6 +633,7 @@ async fn put_cipher_partial(
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
struct CollectionsAdminData {
+ #[serde(alias = "CollectionIds")]
collection_ids: Vec<String>,
}
diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs
@@ -377,7 +377,7 @@ async fn put_organization_collection_update(
async fn post_organization_collection_update(
org_id: &str,
col_id: &str,
- _headers: ManagerHeaders,
+ headers: ManagerHeaders,
data: Json<NewCollectionData>,
conn: DbConn,
) -> JsonResult {
@@ -414,7 +414,11 @@ async fn post_organization_collection_update(
)
.await?;
}
- Ok(Json(collection.to_json()))
+ Ok(Json(
+ collection
+ .to_json_details(&headers.user.uuid, None, &conn)
+ .await,
+ ))
}
#[delete("/organizations/<org_id>/collections/<col_id>/user/<org_user_id>")]
@@ -1104,7 +1108,7 @@ async fn post_org_import(
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
- Cipher::validate_notes(&data.ciphers)?;
+ Cipher::validate_cipher_data(&data.ciphers)?;
let mut collections = Vec::new();
for coll in data.collections {
let collection = Collection::new(org_id.clone(), coll.name, coll.external_id);
diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs
@@ -71,7 +71,14 @@ async fn activate_authenticator(
err!("Invalid key length")
}
// Validate the token provided with the key, and save new twofactor
- validate_totp_code(user.uuid, &token, key.to_uppercase(), &headers.ip, &conn).await?;
+ validate_totp_code(
+ user.uuid.as_str(),
+ &token,
+ key.to_uppercase(),
+ &headers.ip,
+ &conn,
+ )
+ .await?;
Ok(Json(json!({
"enabled": true,
"key": key,
@@ -89,7 +96,7 @@ async fn activate_authenticator_put(
}
pub async fn validate_totp_code_str(
- user_uuid: String,
+ user_uuid: &str,
totp_code: &str,
secret: String,
ip: &ClientIp,
@@ -106,7 +113,7 @@ pub async fn validate_totp_code_str(
clippy::redundant_else
)]
async fn validate_totp_code(
- user_uuid: String,
+ user_uuid: &str,
totp_code: &str,
secret: String,
ip: &ClientIp,
@@ -116,9 +123,9 @@ async fn validate_totp_code(
let Ok(decoded_secret) = BASE32.decode(secret.as_bytes()) else {
err!("Invalid TOTP secret")
};
- let mut totp = Totp::find_by_user(user_uuid.as_str(), conn)
+ let mut totp = Totp::find_by_user(user_uuid, conn)
.await?
- .unwrap_or_else(|| Totp::new(user_uuid, secret));
+ .unwrap_or_else(|| Totp::new(user_uuid.to_owned(), secret));
let current_time = chrono::Utc::now();
let current_timestamp = u64::try_from(current_time.timestamp()).expect("underflow");
let time_step = current_timestamp / 30u64;
diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs
@@ -146,13 +146,13 @@ async fn delete_webauthn(data: Json<DeleteU2FData>, headers: Headers, conn: DbCo
})))
}
-pub async fn generate_webauthn_login(user_uuid: String, conn: &DbConn) -> JsonResult {
- let keys = WebAuthn::get_all_security_keys(user_uuid.as_str(), conn).await?;
+pub async fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
+ let keys = WebAuthn::get_all_security_keys(user_uuid, conn).await?;
if keys.is_empty() {
err!("No WebAuthn devices registered")
}
let (challenge, auth) = build_webauthn()?.start_securitykey_authentication(keys.as_slice())?;
- WebAuthnChallenge::Auth(WebAuthnAuth::new(user_uuid, &auth)?)
+ WebAuthnChallenge::Auth(WebAuthnAuth::new(user_uuid.to_owned(), &auth)?)
.replace(conn)
.await?;
Ok(Json(serde_json::to_value(challenge.public_key)?))
diff --git a/src/api/identity.rs b/src/api/identity.rs
@@ -6,7 +6,7 @@ use crate::{
auth::{ClientHeaders, ClientIp},
config,
db::{
- models::{Device, TwoFactorType, User},
+ models::{Device, OrgPolicy, OrgPolicyType, TwoFactorType, User},
DbConn,
},
error::{Error, MapResult},
@@ -71,25 +71,32 @@ async fn _refresh_login(data: ConnectData, conn: &DbConn) -> JsonResult {
"expires_in": expires_in,
"token_type": "Bearer",
"refresh_token": device.refresh_token,
- "scope": scope,
- "PrivateKey": user.private_key,
"Key": user.akey,
- "MasterPasswordPolicy": {
- "Object": "masterPasswordPolicy"
- },
- "ForcePasswordReset": false,
- "ResetMasterPassword": false,
+ "PrivateKey": user.private_key,
+
"Kdf": user.client_kdf_type,
"KdfIterations": user.client_kdf_iter(),
"KdfMemory": user.client_kdf_memory(),
"KdfParallelism": user.client_kdf_parallelism(),
- "UserDecryptionOptions": {
- "HasMasterPassword": true,
- "Object": "userDecryptionOptions"
- },
+ "ResetMasterPassword": false, // TODO: according to official server seems something like: user.password_hash.is_empty(), but would need testing
+ "scope": scope,
+ "unofficialServer": true,
});
Ok(Json(result))
}
+#[expect(clippy::struct_excessive_bools, reason = "upstream")]
+#[derive(Default, Deserialize, Serialize)]
+#[serde(rename_all = "camelCase")]
+struct MasterPasswordPolicy {
+ min_complexity: u8,
+ min_length: u32,
+ require_lower: bool,
+ require_upper: bool,
+ require_numbers: bool,
+ require_special: bool,
+ enforce_on_login: bool,
+}
+
#[allow(clippy::else_if_without_else)]
async fn _password_login(
data: ConnectData,
@@ -147,7 +154,7 @@ async fn _password_login(
let kdf_iter = user.client_kdf_iter();
let kdf_mem = user.client_kdf_memory();
let kdf_par = user.client_kdf_parallelism();
- let twofactor_token = twofactor_auth(user.uuid, &data, &mut device, ip, conn).await?;
+ let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?;
// Common
// ---
// Disabled this variable, it was used to generate the JWT
@@ -156,6 +163,35 @@ async fn _password_login(
// ---
// let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await;
device.save(conn).await?;
+ // Fetch all valid Master Password Policies and merge them into one with all true's and larges numbers as one policy
+ let master_password_policies: Vec<MasterPasswordPolicy> =
+ OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
+ &user.uuid,
+ OrgPolicyType::MasterPassword,
+ conn,
+ )
+ .await
+ .into_iter()
+ .filter_map(|p| serde_json::from_str(&p.data).ok())
+ .collect();
+
+ let master_password_policy = if master_password_policies.is_empty() {
+ json!({"object": "masterPasswordPolicy"})
+ } else {
+ let mut mpp_json = json!(master_password_policies.into_iter().reduce(|acc, policy| {
+ MasterPasswordPolicy {
+ min_complexity: acc.min_complexity.max(policy.min_complexity),
+ min_length: acc.min_length.max(policy.min_length),
+ require_lower: acc.require_lower || policy.require_lower,
+ require_upper: acc.require_upper || policy.require_upper,
+ require_numbers: acc.require_numbers || policy.require_numbers,
+ require_special: acc.require_special || policy.require_special,
+ enforce_on_login: acc.enforce_on_login || policy.enforce_on_login,
+ }
+ }));
+ mpp_json["object"] = json!("masterPasswordPolicy");
+ mpp_json
+ };
let mut result = json!({
"access_token": access_token,
"expires_in": expires_in,
@@ -169,9 +205,7 @@ async fn _password_login(
"KdfParallelism": kdf_par,
"ResetMasterPassword": false, // TODO: Same as above
"ForcePasswordReset": false,
- "MasterPasswordPolicy": {
- "object": "masterPasswordPolicy",
- },
+ "MasterPasswordPolicy": master_password_policy,
"scope": scope,
"unofficialServer": true,
"UserDecryptionOptions": {
@@ -209,13 +243,13 @@ async fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device,
}
async fn twofactor_auth(
- user_uuid: String,
+ user_uuid: &str,
data: &ConnectData,
device: &mut Device,
ip: &ClientIp,
conn: &DbConn,
) -> ApiResult<Option<String>> {
- let (authn, totp_token) = TwoFactorType::get_factors(user_uuid.as_str(), conn).await?;
+ let (authn, totp_token) = TwoFactorType::get_factors(user_uuid, conn).await?;
if authn || totp_token.is_some() {
let Some(ref token) = data.two_factor_token else {
err_json!(
@@ -269,7 +303,7 @@ async fn twofactor_auth(
.await?;
}
TwoFactorType::WebAuthn => {
- _tf::webauthn::validate_webauthn_login(user_uuid.as_str(), token, conn).await?;
+ _tf::webauthn::validate_webauthn_login(user_uuid, token, conn).await?;
}
}
device.delete_twofactor_remember();
@@ -280,13 +314,13 @@ async fn twofactor_auth(
async fn _json_err_twofactor(
authn: bool,
totp: bool,
- user_uuid: String,
+ user_uuid: &str,
conn: &DbConn,
) -> ApiResult<Value> {
use crate::api::core::two_factor;
- let auth_num = i32::from(TwoFactorType::WebAuthn);
- let totp_num = i32::from(TwoFactorType::Totp);
- let providers = [auth_num, totp_num];
+ let auth_num = i32::from(TwoFactorType::WebAuthn).to_string();
+ let totp_num = i32::from(TwoFactorType::Totp).to_string();
+ let providers = [auth_num.as_str(), totp_num.as_str()];
let mut result = json!({
"error" : "invalid_grant",
"error_description" : "Two factor required.",
@@ -298,10 +332,10 @@ async fn _json_err_twofactor(
});
if authn {
let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
- result["TwoFactorProviders2"][auth_num.to_string()] = request.0;
+ result["TwoFactorProviders2"][auth_num] = request.0;
}
if totp {
- result["TwoFactorProviders2"][totp_num.to_string()] = Value::Null;
+ result["TwoFactorProviders2"][totp_num] = Value::Null;
}
Ok(result)
}
diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs
@@ -2,8 +2,8 @@ use super::{
CollectionCipher, Favorite, FolderCipher, User, UserOrgStatus, UserOrgType, UserOrganization,
};
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
-use crate::util;
-use chrono::{NaiveDateTime, Utc};
+use crate::util::{self, LowerCase};
+use chrono::{DateTime, NaiveDateTime, Utc};
use diesel::result::{self, DatabaseErrorKind};
use serde_json::Value;
use std::borrow::Cow;
@@ -63,7 +63,7 @@ impl Cipher {
reprompt: None,
}
}
- pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
+ pub fn validate_cipher_data(cipher_data: &[CipherData]) -> EmptyResult {
let mut validation_errors = serde_json::Map::new();
for (index, cipher) in cipher_data.iter().enumerate() {
if let Some(ref note) = cipher.notes {
@@ -77,6 +77,23 @@ impl Cipher {
);
}
}
+ // Validate the password history if it contains `null` values and if so, return a warning
+ if let Some(Value::Array(ref password_history)) = cipher.password_history {
+ for pwh in password_history {
+ if let Value::Object(ref pwo) = *pwh {
+ if pwo.get("password").is_some_and(|p| !p.is_string()) {
+ validation_errors.insert(
+ format!("Ciphers[{index}].Notes"),
+ serde_json::to_value([
+ "The password history contains a `null` value. Only strings are allowed.",
+ ])
+ .unwrap(),
+ );
+ break;
+ }
+ }
+ }
+ }
}
if !validation_errors.is_empty() {
let err_json = json!({
@@ -101,16 +118,6 @@ impl Cipher {
sync_type: CipherSyncType,
conn: &DbConn,
) -> Value {
- let fields_json = self
- .fields
- .as_ref()
- .and_then(|s| serde_json::from_str(s).ok())
- .unwrap_or(Value::Null);
- let password_history_json = self
- .password_history
- .as_ref()
- .and_then(|s| serde_json::from_str(s).ok())
- .unwrap_or(Value::Null);
// We don't need these values at all for Organizational syncs
// Skip any other database calls if this is the case and just return false.
let (read_only, hide_passwords) = if sync_type == CipherSyncType::User {
@@ -124,27 +131,76 @@ impl Cipher {
} else {
(false, false)
};
+ let fields_json = self
+ .fields
+ .as_ref()
+ .and_then(|s| {
+ serde_json::from_str::<Vec<LowerCase<Value>>>(s)
+ .inspect_err(|e| warn!("Error parsing fields {e:?} for {}", self.uuid))
+ .ok()
+ })
+ .map_or(Value::Null, |d| d.into_iter().map(|da| da.data).collect());
+ let password_history_json = self
+ .password_history
+ .as_ref()
+ .and_then(|s| {
+ serde_json::from_str::<Vec<LowerCase<Value>>>(s)
+ .inspect_err(|e| {
+ warn!("Error parsing password history {e:?} for {}", self.uuid);
+ })
+ .ok()
+ })
+ .map_or(Value::Null, |d| {
+ // Check every password history item if they are valid and return it.
+ // If a password field has the type `null` skip it, it breaks newer Bitwarden clients
+ // A second check is done to verify the lastUsedDate exists and is a string, if not the epoch start time will be used
+ d.into_iter()
+ .filter_map(|da| match da.data.get("password") {
+ Some(p) if p.is_string() => Some(da.data),
+ _ => None,
+ })
+ .map(|da| match da.get("lastUsedDate").and_then(|l| l.as_str()) {
+ Some(l) if DateTime::parse_from_rfc3339(l).is_ok() => da,
+ _ => {
+ let mut dat = da;
+ dat["lastUsedDate"] = json!("1970-01-01T00:00:00.000Z");
+ dat
+ }
+ })
+ .collect()
+ });
// Get the type_data or a default to an empty json object '{}'.
// If not passing an empty object, mobile clients will crash.
- let mut type_data_json: Value = serde_json::from_str(&self.data)
- .unwrap_or_else(|_| Value::Object(serde_json::Map::new()));
+ let mut type_data_json = serde_json::from_str::<LowerCase<Value>>(&self.data).map_or_else(
+ |_e| {
+ warn!("Error parsing data field for {}", self.uuid);
+ Value::Object(serde_json::Map::new())
+ },
+ |d| d.data,
+ );
// NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream
// Set the first element of the Uris array as Uri, this is needed several (mobile) clients.
if self.atype == 1i32 {
- if type_data_json["Uris"].is_array() {
- let uri = type_data_json["Uris"][0]["Uri"].clone();
- type_data_json["Uri"] = uri;
+ if type_data_json["uris"].is_array() {
+ let uri = type_data_json["uris"][0]["uri"].clone();
+ type_data_json["uri"] = uri;
} else {
// Upstream always has an Uri key/value
- type_data_json["Uri"] = Value::Null;
+ type_data_json["uri"] = Value::Null;
}
}
- if self.atype == 2i32
- && (self.data.is_empty()
- || self.data.eq("{}")
- || self.data.to_ascii_lowercase().eq("{\"type\":null}"))
- {
- type_data_json = json!({"type": 0i32});
+ if self.atype == 2i32 {
+ match type_data_json {
+ Value::Object(ref t) if t.get("type").is_some_and(Value::is_number) => {}
+ Value::Null
+ | Value::Bool(_)
+ | Value::Number(_)
+ | Value::String(_)
+ | Value::Array(_)
+ | Value::Object(_) => {
+ type_data_json = json!({"type": 0i32});
+ }
+ }
}
// Clone the type_data and add some default value.
let mut data_json = type_data_json.clone();
diff --git a/src/db/models/collection.rs b/src/db/models/collection.rs
@@ -75,25 +75,61 @@ impl Collection {
cipher_sync_data: Option<&CipherSyncData>,
conn: &DbConn,
) -> Value {
- let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
+ let (read_only, hide_passwords, can_manage) = if let Some(cipher_sync_data) =
+ cipher_sync_data
+ {
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
- Some(uo) if uo.has_full_access() => (false, false),
- Some(_) => cipher_sync_data
- .user_collections
- .get(&self.uuid)
- .map_or((false, false), |uc| (uc.read_only, uc.hide_passwords)),
- _ => (true, true),
+ // Only for Manager types Bitwarden returns true for the can_manage option
+ // Owners and Admins always have false, but they can manage all collections anyway
+ Some(uo) if uo.has_full_access() => {
+ (false, false, uo.atype == UserOrgType::Manager)
+ }
+ Some(uo) => {
+ // Only let a manager manage collections when the have full read/write access
+ let is_manager = uo.atype == UserOrgType::Manager;
+ cipher_sync_data.user_collections.get(&self.uuid).map_or(
+ (false, false, false),
+ |uc| {
+ (
+ uc.read_only,
+ uc.hide_passwords,
+ is_manager && !uc.read_only && !uc.hide_passwords,
+ )
+ },
+ )
+ }
+ _ => (true, true, false),
}
} else {
- (
- !self.is_writable_by_user(user_uuid, conn).await,
- self.hide_passwords_for_user(user_uuid, conn).await,
- )
+ match UserOrganization::find_confirmed_by_user_and_org(user_uuid, &self.org_uuid, conn)
+ .await
+ {
+ Some(ou) if ou.has_full_access() => {
+ (false, false, ou.atype == UserOrgType::Manager)
+ }
+ Some(ou) => {
+ let is_manager = ou.atype == UserOrgType::Manager;
+ let read_only = !self.is_writable_by_user(user_uuid, conn).await;
+ let hide_passwords = self.hide_passwords_for_user(user_uuid, conn).await;
+ (
+ read_only,
+ hide_passwords,
+ is_manager && !read_only && !hide_passwords,
+ )
+ }
+ _ => (
+ !self.is_writable_by_user(user_uuid, conn).await,
+ self.hide_passwords_for_user(user_uuid, conn).await,
+ false,
+ ),
+ }
};
+
let mut json_object = self.to_json();
json_object["object"] = json!("collectionDetails");
json_object["readOnly"] = json!(read_only);
json_object["hidePasswords"] = json!(hide_passwords);
+ json_object["manage"] = json!(can_manage);
json_object
}
@@ -494,7 +530,7 @@ impl CollectionUser {
Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
db_run! { conn: {
for user in collectionusers {
- diesel::delete(users_collections::table.filter(
+ let _: () = diesel::delete(users_collections::table.filter(
users_collections::user_uuid.eq(user_uuid)
.and(users_collections::collection_uuid.eq(user.collection_uuid))
))
diff --git a/src/db/models/folder.rs b/src/db/models/folder.rs
@@ -39,10 +39,10 @@ impl Folder {
pub fn to_json(&self) -> Value {
use util::format_date;
json!({
- "Id": self.uuid,
- "RevisionDate": format_date(&self.updated_at),
- "Name": self.name,
- "Object": "folder",
+ "id": self.uuid,
+ "revisionDate": format_date(&self.updated_at),
+ "name": self.name,
+ "object": "folder",
})
}
}
diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs
@@ -177,7 +177,7 @@ impl OrgPolicy {
}}
}
- async fn find_accepted_and_confirmed_by_user_and_active_policy(
+ pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
user_uuid: &str,
policy_type: OrgPolicyType,
conn: &DbConn,
diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs
@@ -1,10 +1,11 @@
use super::{CollectionUser, OrgPolicy, OrgPolicyType, User};
-use crate::db::models::TwoFactorType;
+use crate::db::models::{Collection, TwoFactorType};
use crate::error::Error;
use crate::util;
+use core::cmp::Ordering;
use diesel::result::{self, DatabaseErrorKind};
use serde_json::Value;
-use std::cmp::Ordering;
+use std::collections::HashMap;
db_object! {
#[derive(AsChangeset, Insertable, Queryable)]
@@ -429,21 +430,43 @@ impl UserOrganization {
.expect("unable to get two factor information");
let groups: Vec<String> = Vec::new();
let collections: Vec<Value> = if include_collections {
- CollectionUser::find_by_organization_and_user_uuid(
- &self.org_uuid,
- &self.user_uuid,
- conn,
- )
- .await
- .iter()
- .map(|cu| {
- json!({
- "id": cu.collection_uuid,
- "readOnly": cu.read_only,
- "hidePasswords": cu.hide_passwords,
+ // Get all collections for the user here already to prevent more queries
+ let cu: HashMap<String, CollectionUser> =
+ CollectionUser::find_by_organization_and_user_uuid(
+ &self.org_uuid,
+ &self.user_uuid,
+ conn,
+ )
+ .await
+ .into_iter()
+ .map(|cu| (cu.collection_uuid.clone(), cu))
+ .collect();
+ Collection::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn)
+ .await
+ .into_iter()
+ .map(|c| {
+ let (read_only, hide_passwords, can_manage) = if self.has_full_access() {
+ (false, false, self.atype == UserOrgType::Manager)
+ } else if let Some(cu) = cu.get(&c.uuid) {
+ (
+ cu.read_only,
+ cu.hide_passwords,
+ self.atype == UserOrgType::Manager
+ && !cu.read_only
+ && !cu.hide_passwords,
+ )
+ } else {
+ (true, true, false)
+ };
+
+ json!({
+ "id": c.uuid,
+ "readOnly": read_only,
+ "hidePasswords": hide_passwords,
+ "manage": can_manage,
+ })
})
- })
- .collect()
+ .collect()
} else {
Vec::with_capacity(0)
};
@@ -454,6 +477,7 @@ impl UserOrganization {
"name": user.name,
"email": user.email,
"externalId": self.external_id,
+ "avatarColor": user.avatar_color,
"groups": groups,
"collections": collections,
"status": status,
@@ -755,14 +779,3 @@ impl UserOrganization {
}}
}
}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- #[test]
- fn partial_cmp_UserOrgType() {
- assert!(UserOrgType::Owner > UserOrgType::Admin);
- assert!(UserOrgType::Admin > UserOrgType::Manager);
- assert!(UserOrgType::Manager > UserOrgType::User);
- }
-}
diff --git a/src/db/models/two_factor.rs b/src/db/models/two_factor.rs
@@ -151,9 +151,9 @@ impl Serialize for TwoFactorType {
S: Serializer,
{
let mut s = serializer.serialize_struct("TwoFactorType", 3)?;
- s.serialize_field("Enabled", &true)?;
- s.serialize_field("Type", &i32::from(*self))?;
- s.serialize_field("Object", "twoFactorProvider")?;
+ s.serialize_field("enabled", &true)?;
+ s.serialize_field("type", &i32::from(*self))?;
+ s.serialize_field("object", "twoFactorProvider")?;
s.end()
}
}
diff --git a/src/error.rs b/src/error.rs
@@ -1,9 +1,10 @@
//
// Error generator macro
//
+use core::error::Error as StdError;
use core::fmt::{self, Debug, Display, Formatter};
-use std::error::Error as StdError;
+#[expect(edition_2024_expr_fragment_specifier, reason = "false positive")]
macro_rules! make_error {
( $( $name:ident ( $ty:ty ): $src_fn:expr, $usr_msg_fun:expr ),+ $(,)? ) => {
const BAD_REQUEST: u16 = 400;
@@ -239,6 +240,7 @@ impl<'r> Responder<'r, 'static> for Error {
//
// Error return macros
//
+#[expect(edition_2024_expr_fragment_specifier, reason = "false positive")]
#[macro_export]
macro_rules! err {
($msg:expr) => {{
@@ -255,6 +257,7 @@ macro_rules! err {
}};
}
+#[expect(edition_2024_expr_fragment_specifier, reason = "false positive")]
#[macro_export]
macro_rules! err_silent {
($msg:expr) => {{
@@ -265,6 +268,7 @@ macro_rules! err_silent {
}};
}
+#[expect(edition_2024_expr_fragment_specifier, reason = "false positive")]
#[macro_export]
macro_rules! err_code {
($msg:expr, $err_code:expr) => {{
@@ -275,6 +279,7 @@ macro_rules! err_code {
}};
}
+#[expect(edition_2024_expr_fragment_specifier, reason = "false positive")]
#[macro_export]
macro_rules! err_discard {
($msg:expr, $data:expr) => {{
@@ -287,6 +292,7 @@ macro_rules! err_discard {
}};
}
+#[expect(edition_2024_expr_fragment_specifier, reason = "false positive")]
#[macro_export]
macro_rules! err_json {
($expr:expr, $log_value:expr) => {{
@@ -297,6 +303,7 @@ macro_rules! err_json {
}};
}
+#[expect(edition_2024_expr_fragment_specifier, reason = "false positive")]
#[macro_export]
macro_rules! err_handler {
($expr:expr) => {{