commit 2a8a460557bc7fae2df9cc7b7f7831fdee3e6b87
parent c27faa018ad4353e5a57e97d0b21967a9095f0e7
Author: Zack Newman <zack@philomathiclife.com>
Date: Fri, 1 Dec 2023 16:32:01 -0700
more cleanup
Diffstat:
54 files changed, 1707 insertions(+), 6788 deletions(-)
diff --git a/Cargo.toml b/Cargo.toml
@@ -1,5 +1,5 @@
[package]
-authors = ["Zack Newman <zack@philomathiclife.com>"]
+authors = ["Daniel GarcĂa <dani-garcia@users.noreply.github.com>", "Zack Newman <zack@philomathiclife.com>"]
categories = ["api-bindings", "web-programming::http-server"]
description = "Fork of Vaultwarden with fewer features and pledge(2) and unveil(2) support."
documentation = "https://github.com/dani-garcia/vaultwarden/wiki"
diff --git a/build.rs b/build.rs
@@ -1,3 +0,0 @@
-fn main() {
- println!("cargo:rustc-cfg=sqlite");
-}
diff --git a/config.toml b/config.toml
@@ -3,10 +3,8 @@ database_max_conns=4
db_connection_retries=8
domain="pmd.philomathiclife.com"
ip="fdb5:d87:ae42:1::1"
-org_attachment_limit=0
#password_iterations=600000
port=8443
-user_attachment_limit=0
#web_vault_enabled=true
workers=4
[tls]
diff --git a/src/api/admin.rs b/src/api/admin.rs
@@ -2,10 +2,10 @@ use rocket::{Catcher, Route};
pub fn routes() -> Vec<Route> {
routes![admin_disabled]
}
-pub fn catchers() -> Vec<Catcher> {
- catchers![]
+pub const fn catchers() -> Vec<Catcher> {
+ Vec::new()
}
#[get("/")]
-fn admin_disabled() -> &'static str {
- "The admin panel is not allowed to be enabled."
+const fn admin_disabled() -> &'static str {
+ "The admin panel is permanently disabled."
}
diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs
@@ -1,21 +1,22 @@
-use chrono::Utc;
-use rocket::serde::json::Json;
-use serde_json::Value;
-
use crate::{
api::{
AnonymousNotify, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString,
PasswordOrOtpData, UpdateType,
},
- auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers},
+ auth::{decode_delete, decode_verify_email, ClientHeaders, Headers},
config, crypto,
- db::{models::*, DbConn},
+ db::{
+ models::{AuthRequest, Cipher, Device, DeviceType, Folder, User, UserKdfType},
+ DbConn,
+ },
};
-
+use chrono::Utc;
+use rocket::serde::json::Json;
use rocket::{
http::Status,
request::{FromRequest, Outcome, Request},
};
+use serde_json::Value;
pub fn routes() -> Vec<rocket::Route> {
routes![
@@ -59,7 +60,7 @@ pub fn routes() -> Vec<rocket::Route> {
}
#[derive(Deserialize, Debug)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
pub struct RegisterData {
Email: String,
Kdf: Option<i32>,
@@ -85,13 +86,10 @@ struct KeysData {
/// Trims whitespace from password hints, and converts blank password hints to `None`.
fn clean_password_hint(password_hint: &Option<String>) -> Option<String> {
- match password_hint {
- None => None,
- Some(h) => match h.trim() {
- "" => None,
- ht => Some(ht.to_string()),
- },
- }
+ password_hint.as_ref().and_then(|h| match h.trim() {
+ "" => None,
+ ht => Some(ht.to_string()),
+ })
}
fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult {
@@ -101,102 +99,14 @@ fn enforce_password_hint_setting(password_hint: &Option<String>) -> EmptyResult
Ok(())
}
+#[allow(unused_variables)]
#[post("/accounts/register", data = "<data>")]
-async fn register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
- _register(data, conn).await
+fn register(data: JsonUpcase<RegisterData>, _conn: DbConn) -> JsonResult {
+ err!("Registration is permanently disabled.")
}
-
-pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> JsonResult {
- let data: RegisterData = data.into_inner().data;
- let email = data.Email.to_lowercase();
-
- // Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
- // This also prevents issues with very long usernames causing to large JWT's. See #2419
- if let Some(ref name) = data.Name {
- if name.len() > 50 {
- err!("The field Name must be a string with a maximum length of 50.");
- }
- }
-
- // Check against the password hint setting here so if it fails, the user
- // can retry without losing their invitation below.
- let password_hint = clean_password_hint(&data.MasterPasswordHint);
- enforce_password_hint_setting(&password_hint)?;
- let mut user = match User::find_by_mail(&email, &mut conn).await {
- Some(mut user) => {
- if !user.password_hash.is_empty() {
- err!("Registration not allowed or user already exists")
- }
-
- if let Some(token) = data.Token {
- let claims = decode_invite(&token)?;
- if claims.email == email {
- user.verified_at = Some(Utc::now().naive_utc());
- user
- } else {
- err!("Registration email does not match invite email")
- }
- } else if Invitation::take(&email, &mut conn).await {
- for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn)
- .await
- .iter_mut()
- {
- user_org.status = UserOrgStatus::Accepted as i32;
- user_org.save(&mut conn).await?;
- }
- user
- } else {
- err!("Registration not allowed or user already exists")
- }
- }
- None => {
- // Order is important here; the invitation check must come first
- // because the vaultwarden admin can invite anyone, regardless
- // of other signup restrictions.
- if Invitation::take(&email, &mut conn).await {
- User::new(email.clone())
- } else {
- err!("Registration not allowed or user already exists")
- }
- }
- };
-
- // Make sure we don't leave a lingering invitation.
- Invitation::take(&email, &mut conn).await;
-
- if let Some(client_kdf_type) = data.Kdf {
- user.client_kdf_type = client_kdf_type;
- }
-
- if let Some(client_kdf_iter) = data.KdfIterations {
- user.client_kdf_iter = client_kdf_iter;
- }
-
- user.client_kdf_memory = data.KdfMemory;
- user.client_kdf_parallelism = data.KdfParallelism;
-
- user.set_password(&data.MasterPasswordHash, Some(data.Key), true, None);
- user.password_hint = password_hint;
-
- // Add extra fields if present
- if let Some(name) = data.Name {
- user.name = name;
- }
-
- if let Some(keys) = data.Keys {
- user.private_key = Some(keys.EncryptedPrivateKey);
- user.public_key = Some(keys.PublicKey);
- }
- user.save(&mut conn).await?;
- Ok(Json(json!({
- "Object": "register",
- "CaptchaBypassToken": "",
- })))
-}
-
#[get("/accounts/profile")]
-async fn profile(headers: Headers, mut conn: DbConn) -> Json<Value> {
- Json(headers.user.to_json(&mut conn).await)
+async fn profile(headers: Headers, conn: DbConn) -> Json<Value> {
+ Json(headers.user.to_json(&conn).await)
}
#[derive(Deserialize, Debug)]
@@ -213,11 +123,7 @@ async fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbCo
}
#[post("/accounts/profile", data = "<data>")]
-async fn post_profile(
- data: JsonUpcase<ProfileData>,
- headers: Headers,
- mut conn: DbConn,
-) -> JsonResult {
+async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: ProfileData = data.into_inner().data;
// Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden)
@@ -229,8 +135,8 @@ async fn post_profile(
let mut user = headers.user;
user.name = data.Name;
- user.save(&mut conn).await?;
- Ok(Json(user.to_json(&mut conn).await))
+ user.save(&conn).await?;
+ Ok(Json(user.to_json(&conn).await))
}
#[derive(Deserialize)]
@@ -240,11 +146,7 @@ struct AvatarData {
}
#[put("/accounts/avatar", data = "<data>")]
-async fn put_avatar(
- data: JsonUpcase<AvatarData>,
- headers: Headers,
- mut conn: DbConn,
-) -> JsonResult {
+async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: AvatarData = data.into_inner().data;
// It looks like it only supports the 6 hex color format.
@@ -261,17 +163,15 @@ async fn put_avatar(
let mut user = headers.user;
user.avatar_color = data.AvatarColor;
- user.save(&mut conn).await?;
- Ok(Json(user.to_json(&mut conn).await))
+ user.save(&conn).await?;
+ Ok(Json(user.to_json(&conn).await))
}
#[get("/users/<uuid>/public-key")]
-async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> JsonResult {
- let user = match User::find_by_uuid(uuid, &mut conn).await {
- Some(user) => user,
- None => err!("User doesn't exist"),
+async fn get_public_keys(uuid: &str, _headers: Headers, conn: DbConn) -> JsonResult {
+ let Some(user) = User::find_by_uuid(uuid, &conn).await else {
+ err!("User doesn't exist")
};
-
Ok(Json(json!({
"UserId": user.uuid,
"PublicKey": user.public_key,
@@ -280,16 +180,12 @@ async fn get_public_keys(uuid: &str, _headers: Headers, mut conn: DbConn) -> Jso
}
#[post("/accounts/keys", data = "<data>")]
-async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, mut conn: DbConn) -> JsonResult {
+async fn post_keys(data: JsonUpcase<KeysData>, headers: Headers, conn: DbConn) -> JsonResult {
let data: KeysData = data.into_inner().data;
-
let mut user = headers.user;
-
user.private_key = Some(data.EncryptedPrivateKey);
user.public_key = Some(data.PublicKey);
-
- user.save(&mut conn).await?;
-
+ user.save(&conn).await?;
Ok(Json(json!({
"PrivateKey": user.private_key,
"PublicKey": user.public_key,
@@ -310,7 +206,7 @@ struct ChangePassData {
async fn post_password(
data: JsonUpcase<ChangePassData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: ChangePassData = data.into_inner().data;
@@ -332,7 +228,7 @@ async fn post_password(
]),
);
- let save_result = user.save(&mut conn).await;
+ let save_result = user.save(&conn).await;
// Prevent logging out the client where the user requested this endpoint from.
// If you do logout the user it will causes issues at the client side.
@@ -359,7 +255,7 @@ struct ChangeKdfData {
async fn post_kdf(
data: JsonUpcase<ChangeKdfData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: ChangeKdfData = data.into_inner().data;
@@ -400,7 +296,7 @@ async fn post_kdf(
user.client_kdf_iter = data.KdfIterations;
user.client_kdf_type = data.Kdf;
user.set_password(&data.NewMasterPasswordHash, Some(data.Key), true, None);
- let save_result = user.save(&mut conn).await;
+ let save_result = user.save(&conn).await;
nt.send_logout(&user, Some(headers.device.uuid)).await;
@@ -430,7 +326,7 @@ struct KeyData {
async fn post_rotatekey(
data: JsonUpcase<KeyData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: KeyData = data.into_inner().data;
@@ -449,9 +345,8 @@ async fn post_rotatekey(
// Update folder data
for folder_data in data.Folders {
- let mut saved_folder = match Folder::find_by_uuid(&folder_data.Id, &mut conn).await {
- Some(folder) => folder,
- None => err!("Folder doesn't exist"),
+ let Some(mut saved_folder) = Folder::find_by_uuid(&folder_data.Id, &conn).await else {
+ err!("Folder doesn't exist")
};
if &saved_folder.user_uuid != user_uuid {
@@ -459,18 +354,18 @@ async fn post_rotatekey(
}
saved_folder.name = folder_data.Name;
- saved_folder.save(&mut conn).await?
+ saved_folder.save(&conn).await?;
}
// Update cipher data
use super::ciphers::update_cipher_from_data;
for cipher_data in data.Ciphers {
- let mut saved_cipher =
- match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
- };
+ let Some(mut saved_cipher) =
+ Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &conn).await
+ else {
+ err!("Cipher doesn't exist")
+ };
if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid {
err!("The cipher is not owned by the user")
@@ -484,11 +379,11 @@ async fn post_rotatekey(
cipher_data,
&headers,
false,
- &mut conn,
+ &conn,
&nt,
UpdateType::None,
)
- .await?
+ .await?;
}
// Update user data
@@ -498,7 +393,7 @@ async fn post_rotatekey(
user.private_key = Some(data.PrivateKey);
user.reset_security_stamp();
- let save_result = user.save(&mut conn).await;
+ let save_result = user.save(&conn).await;
// Prevent logging out the client where the user requested this endpoint from.
// If you do logout the user it will causes issues at the client side.
@@ -512,17 +407,17 @@ async fn post_rotatekey(
async fn post_sstamp(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: PasswordOrOtpData = data.into_inner().data;
let mut user = headers.user;
- data.validate(&user, true, &mut conn).await?;
+ data.validate(&user, true, &conn).await?;
- Device::delete_all_by_user(&user.uuid, &mut conn).await?;
+ Device::delete_all_by_user(&user.uuid, &conn).await?;
user.reset_security_stamp();
- let save_result = user.save(&mut conn).await;
+ let save_result = user.save(&conn).await;
nt.send_logout(&user, None).await;
@@ -582,18 +477,16 @@ struct VerifyEmailTokenData {
#[post("/accounts/verify-email-token", data = "<data>")]
async fn post_verify_email_token(
data: JsonUpcase<VerifyEmailTokenData>,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
let data: VerifyEmailTokenData = data.into_inner().data;
- let mut user = match User::find_by_uuid(&data.UserId, &mut conn).await {
- Some(user) => user,
- None => err!("User doesn't exist"),
+ let Some(mut user) = User::find_by_uuid(&data.UserId, &conn).await else {
+ err!("User doesn't exist")
};
- let claims = match decode_verify_email(&data.Token) {
- Ok(claims) => claims,
- Err(_) => err!("Invalid claim"),
+ let Ok(claims) = decode_verify_email(&data.Token) else {
+ err!("Invalid claim")
};
if claims.sub != user.uuid {
err!("Invalid claim");
@@ -601,7 +494,7 @@ async fn post_verify_email_token(
user.verified_at = Some(Utc::now().naive_utc());
user.last_verifying_at = None;
user.login_verify_count = 0;
- user.save(&mut conn).await
+ user.save(&conn).await
}
#[derive(Deserialize)]
@@ -626,23 +519,21 @@ struct DeleteRecoverTokenData {
#[post("/accounts/delete-recover-token", data = "<data>")]
async fn post_delete_recover_token(
data: JsonUpcase<DeleteRecoverTokenData>,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
let data: DeleteRecoverTokenData = data.into_inner().data;
- let user = match User::find_by_uuid(&data.UserId, &mut conn).await {
- Some(user) => user,
- None => err!("User doesn't exist"),
+ let Some(user) = User::find_by_uuid(&data.UserId, &conn).await else {
+ err!("User doesn't exist")
};
- let claims = match decode_delete(&data.Token) {
- Ok(claims) => claims,
- Err(_) => err!("Invalid claim"),
+ let Ok(claims) = decode_delete(&data.Token) else {
+ err!("Invalid claim")
};
if claims.sub != user.uuid {
err!("Invalid claim");
}
- user.delete(&mut conn).await
+ user.delete(&conn).await
}
#[post("/accounts/delete", data = "<data>")]
@@ -658,14 +549,14 @@ async fn post_delete_account(
async fn delete_account(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
- data.validate(&user, true, &mut conn).await?;
- user.delete(&mut conn).await
+ data.validate(&user, true, &conn).await?;
+ user.delete(&conn).await
}
-
+#[allow(clippy::unnecessary_wraps)]
#[get("/accounts/revision-date")]
fn revision_date(headers: Headers) -> JsonResult {
let revision_date = headers.user.updated_at.timestamp_millis();
@@ -695,24 +586,24 @@ async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
-pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json<Value> {
+pub async fn _prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
let data: PreloginData = data.into_inner().data;
- let (kdf_type, kdf_iter, kdf_mem, kdf_para) =
- match User::find_by_mail(&data.Email, &mut conn).await {
- Some(user) => (
- user.client_kdf_type,
- user.client_kdf_iter,
- user.client_kdf_memory,
- user.client_kdf_parallelism,
- ),
- None => (
- User::CLIENT_KDF_TYPE_DEFAULT,
- User::CLIENT_KDF_ITER_DEFAULT,
- None,
- None,
- ),
- };
+ let (kdf_type, kdf_iter, kdf_mem, kdf_para) = match User::find_by_mail(&data.Email, &conn).await
+ {
+ Some(user) => (
+ user.client_kdf_type,
+ user.client_kdf_iter,
+ user.client_kdf_memory,
+ user.client_kdf_parallelism,
+ ),
+ None => (
+ User::CLIENT_KDF_TYPE_DEFAULT,
+ User::CLIENT_KDF_ITER_DEFAULT,
+ None,
+ None,
+ ),
+ };
let result = json!({
"Kdf": kdf_type,
@@ -747,18 +638,18 @@ async fn _api_key(
data: JsonUpcase<PasswordOrOtpData>,
rotate: bool,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
use crate::util::format_date;
let data: PasswordOrOtpData = data.into_inner().data;
let mut user = headers.user;
- data.validate(&user, true, &mut conn).await?;
+ data.validate(&user, true, &conn).await?;
if rotate || user.api_key.is_none() {
user.api_key = Some(crypto::generate_api_key());
- user.save(&mut conn).await.expect("Error saving API key");
+ user.save(&conn).await.expect("Error saving API key");
}
Ok(Json(json!({
@@ -788,11 +679,11 @@ async fn rotate_api_key(
// This variant is deprecated: https://github.com/bitwarden/server/pull/2682
#[get("/devices/knowndevice/<email>/<uuid>")]
-async fn get_known_device_from_path(email: &str, uuid: &str, mut conn: DbConn) -> JsonResult {
+async fn get_known_device_from_path(email: &str, uuid: &str, conn: DbConn) -> JsonResult {
// This endpoint doesn't have auth header
let mut result = false;
- if let Some(user) = User::find_by_mail(email, &mut conn).await {
- result = Device::find_by_uuid_and_user(uuid, &user.uuid, &mut conn)
+ if let Some(user) = User::find_by_mail(email, &conn).await {
+ result = Device::find_by_uuid_and_user(uuid, &user.uuid, &conn)
.await
.is_some();
}
@@ -815,14 +706,12 @@ impl<'r> FromRequest<'r> for KnownDevice {
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let email = if let Some(email_b64) = req.headers().get_one("X-Request-Email") {
- let email_bytes = match data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes()) {
- Ok(bytes) => bytes,
- Err(_) => {
- return Outcome::Error((
- Status::BadRequest,
- "X-Request-Email value failed to decode as base64url",
- ));
- }
+ let Ok(email_bytes) = data_encoding::BASE64URL_NOPAD.decode(email_b64.as_bytes())
+ else {
+ return Outcome::Error((
+ Status::BadRequest,
+ "X-Request-Email value failed to decode as base64url",
+ ));
};
match String::from_utf8(email_bytes) {
Ok(email) => email,
@@ -842,8 +731,7 @@ impl<'r> FromRequest<'r> for KnownDevice {
} else {
return Outcome::Error((Status::BadRequest, "X-Device-Identifier value is required"));
};
-
- Outcome::Success(KnownDevice { email, uuid })
+ Outcome::Success(Self { email, uuid })
}
}
@@ -860,7 +748,7 @@ fn post_device_token(
) -> EmptyResult {
put_device_token(uuid, data, headers, conn)
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::unnecessary_wraps)]
#[put("/devices/identifier/<uuid>/token", data = "<data>")]
fn put_device_token(
uuid: &str,
@@ -870,13 +758,14 @@ fn put_device_token(
) -> EmptyResult {
Ok(())
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::unnecessary_wraps)]
#[put("/devices/identifier/<uuid>/clear-token")]
fn put_clear_device_token(uuid: &str, _conn: DbConn) -> EmptyResult {
Ok(())
}
// On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere
+#[allow(clippy::unnecessary_wraps)]
#[post("/devices/identifier/<uuid>/clear-token")]
fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult {
put_clear_device_token(uuid, conn)
@@ -897,18 +786,13 @@ struct AuthRequestRequest {
async fn post_auth_request(
data: Json<AuthRequestRequest>,
headers: ClientHeaders,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
-
- let user = match User::find_by_mail(&data.email, &mut conn).await {
- Some(user) => user,
- None => {
- err!("AuthRequest doesn't exist")
- }
+ let Some(user) = User::find_by_mail(&data.email, &conn).await else {
+ err!("AuthRequest doesn't exist")
};
-
let mut auth_request = AuthRequest::new(
user.uuid.clone(),
data.deviceIdentifier.clone(),
@@ -917,16 +801,14 @@ async fn post_auth_request(
data.accessCode,
data.publicKey,
);
- auth_request.save(&mut conn).await?;
-
+ auth_request.save(&conn).await?;
nt.send_auth_request(
&user.uuid,
&auth_request.uuid,
&data.deviceIdentifier,
- &mut conn,
+ &conn,
)
.await;
-
Ok(Json(json!({
"id": auth_request.uuid,
"publicKey": auth_request.public_key,
@@ -943,12 +825,9 @@ async fn post_auth_request(
}
#[get("/auth-requests/<uuid>")]
-async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult {
- let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
- Some(auth_request) => auth_request,
- None => {
- err!("AuthRequest doesn't exist")
- }
+async fn get_auth_request(uuid: &str, conn: DbConn) -> JsonResult {
+ let Some(auth_request) = AuthRequest::find_by_uuid(uuid, &conn).await else {
+ err!("AuthRequest doesn't exist")
};
let response_date_utc = auth_request
@@ -985,24 +864,22 @@ struct AuthResponseRequest {
async fn put_auth_request(
uuid: &str,
data: Json<AuthResponseRequest>,
- mut conn: DbConn,
+ conn: DbConn,
ant: AnonymousNotify<'_>,
nt: Notify<'_>,
) -> JsonResult {
let data = data.into_inner();
- let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
+ let mut auth_request: AuthRequest = match AuthRequest::find_by_uuid(uuid, &conn).await {
Some(auth_request) => auth_request,
None => {
err!("AuthRequest doesn't exist")
}
};
-
auth_request.approved = Some(data.requestApproved);
auth_request.enc_key = Some(data.key);
auth_request.master_password_hash = data.masterPasswordHash;
auth_request.response_device_id = Some(data.deviceIdentifier.clone());
- auth_request.save(&mut conn).await?;
-
+ auth_request.save(&conn).await?;
if auth_request.approved.unwrap_or(false) {
ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid)
.await;
@@ -1010,15 +887,13 @@ async fn put_auth_request(
&auth_request.user_uuid,
&auth_request.uuid,
data.deviceIdentifier,
- &mut conn,
+ &conn,
)
.await;
}
-
let response_date_utc = auth_request
.response_date
.map(|response_date| response_date.and_utc());
-
Ok(Json(json!(
{
"id": uuid,
@@ -1037,12 +912,9 @@ async fn put_auth_request(
}
#[get("/auth-requests/<uuid>/response?<code>")]
-async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) -> JsonResult {
- let auth_request = match AuthRequest::find_by_uuid(uuid, &mut conn).await {
- Some(auth_request) => auth_request,
- None => {
- err!("AuthRequest doesn't exist")
- }
+async fn get_auth_request_response(uuid: &str, code: &str, conn: DbConn) -> JsonResult {
+ let Some(auth_request) = AuthRequest::find_by_uuid(uuid, &conn).await else {
+ err!("AuthRequest doesn't exist")
};
if !auth_request.check_access_code(code) {
@@ -1071,16 +943,14 @@ async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) ->
}
#[get("/auth-requests")]
-async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult {
- let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &mut conn).await;
-
+async fn get_auth_requests(headers: Headers, conn: DbConn) -> JsonResult {
+ let auth_requests = AuthRequest::find_by_user(&headers.user.uuid, &conn).await;
Ok(Json(json!({
"data": auth_requests
.iter()
.filter(|request| request.approved.is_none())
.map(|request| {
let response_date_utc = request.response_date.map(|response_date| response_date.and_utc());
-
json!({
"id": request.uuid,
"publicKey": request.public_key,
diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs
@@ -2,9 +2,13 @@ use super::folders::FolderData;
use crate::{
api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordOrOtpData, UpdateType},
auth::Headers,
- config::{self, Config},
- crypto,
- db::{models::*, DbConn},
+ db::{
+ models::{
+ Cipher, Collection, CollectionCipher, CollectionUser, Favorite, Folder, FolderCipher,
+ OrgPolicy, OrgPolicyType, UserOrgType, UserOrganization,
+ },
+ DbConn,
+ },
};
use chrono::{NaiveDateTime, Utc};
use rocket::fs::TempFile;
@@ -87,58 +91,50 @@ struct SyncData {
}
#[get("/sync?<data..>")]
-async fn sync(data: SyncData, headers: Headers, mut conn: DbConn) -> Json<Value> {
- let user_json = headers.user.to_json(&mut conn).await;
+async fn sync(data: SyncData, headers: Headers, conn: DbConn) -> Json<Value> {
+ let user_json = headers.user.to_json(&conn).await;
// Get all ciphers which are visible by the user
- let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
+ let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
let cipher_sync_data =
- CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
+ CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &conn).await;
// Lets generate the ciphers_json using all the gathered info
let mut ciphers_json = Vec::with_capacity(ciphers.len());
for c in ciphers {
ciphers_json.push(
c.to_json(
- &headers.host,
&headers.user.uuid,
Some(&cipher_sync_data),
CipherSyncType::User,
- &mut conn,
+ &conn,
)
.await,
);
}
- let collections = Collection::find_by_user_uuid(headers.user.uuid.clone(), &mut conn).await;
+ let collections = Collection::find_by_user_uuid(headers.user.uuid.clone(), &conn).await;
let mut collections_json = Vec::with_capacity(collections.len());
for c in collections {
collections_json.push(
- c.to_json_details(&headers.user.uuid, Some(&cipher_sync_data), &mut conn)
+ c.to_json_details(&headers.user.uuid, Some(&cipher_sync_data), &conn)
.await,
);
}
- let folders_json: Vec<Value> = Folder::find_by_user(&headers.user.uuid, &mut conn)
+ let folders_json: Vec<Value> = Folder::find_by_user(&headers.user.uuid, &conn)
.await
.iter()
.map(Folder::to_json)
.collect();
- let sends_json: Vec<Value> = Send::find_by_user(&headers.user.uuid, &mut conn)
+ let policies_json: Vec<Value> = OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &conn)
.await
.iter()
- .map(Send::to_json)
+ .map(OrgPolicy::to_json)
.collect();
- let policies_json: Vec<Value> =
- OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &mut conn)
- .await
- .iter()
- .map(OrgPolicy::to_json)
- .collect();
-
let domains_json = if data.exclude_domains {
Value::Null
} else {
@@ -152,27 +148,26 @@ async fn sync(data: SyncData, headers: Headers, mut conn: DbConn) -> Json<Value>
"Policies": policies_json,
"Ciphers": ciphers_json,
"Domains": domains_json,
- "Sends": sends_json,
+ "Sends": Vec::<Value>::new(),
"unofficialServer": true,
"Object": "sync"
}))
}
#[get("/ciphers")]
-async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
- let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
+async fn get_ciphers(headers: Headers, conn: DbConn) -> Json<Value> {
+ let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &conn).await;
let cipher_sync_data =
- CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
+ CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &conn).await;
let mut ciphers_json = Vec::with_capacity(ciphers.len());
for c in ciphers {
ciphers_json.push(
c.to_json(
- &headers.host,
&headers.user.uuid,
Some(&cipher_sync_data),
CipherSyncType::User,
- &mut conn,
+ &conn,
)
.await,
);
@@ -186,14 +181,13 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
}
#[get("/ciphers/<uuid>")]
-async fn get_cipher(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
- let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
+async fn get_cipher(uuid: &str, headers: Headers, conn: DbConn) -> JsonResult {
+ let Some(cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
+ err!("Cipher doesn't exist")
};
if !cipher
- .is_accessible_to_user(&headers.user.uuid, &mut conn)
+ .is_accessible_to_user(&headers.user.uuid, &conn)
.await
{
err!("Cipher is not owned by user")
@@ -201,13 +195,7 @@ async fn get_cipher(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul
Ok(Json(
cipher
- .to_json(
- &headers.host,
- &headers.user.uuid,
- None,
- CipherSyncType::User,
- &mut conn,
- )
+ .to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
.await,
))
}
@@ -224,7 +212,7 @@ async fn get_cipher_details(uuid: &str, headers: Headers, conn: DbConn) -> JsonR
}
#[derive(Deserialize, Debug)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
pub struct CipherData {
// Id is optional as it is included only in bulk share
pub Id: Option<String>,
@@ -232,37 +220,24 @@ pub struct CipherData {
FolderId: Option<String>,
// TODO: Some of these might appear all the time, no need for Option
OrganizationId: Option<String>,
-
Key: Option<String>,
-
- /*
- Login = 1,
- SecureNote = 2,
- Card = 3,
- Identity = 4
- */
pub Type: i32,
pub Name: String,
pub Notes: Option<String>,
Fields: Option<Value>,
-
// Only one of these should exist, depending on type
Login: Option<Value>,
SecureNote: Option<Value>,
Card: Option<Value>,
Identity: Option<Value>,
-
Favorite: Option<bool>,
Reprompt: Option<i32>,
-
PasswordHistory: Option<Value>,
-
// These are used during key rotation
// 'Attachments' is unused, contains map of {id: filename}
#[serde(rename = "Attachments")]
_Attachments: Option<Value>,
Attachments2: Option<HashMap<String, Attachments2Data>>,
-
// The revision datetime (in ISO 8601 format) of the client's local copy
// of the cipher. This is used to prevent a client from updating a cipher
// when it doesn't have the latest version, as that can result in data
@@ -274,14 +249,14 @@ pub struct CipherData {
#[derive(Deserialize, Debug)]
#[allow(non_snake_case)]
-pub struct PartialCipherData {
+struct PartialCipherData {
FolderId: Option<String>,
Favorite: bool,
}
#[derive(Deserialize, Debug)]
-#[allow(non_snake_case)]
-pub struct Attachments2Data {
+#[allow(dead_code, non_snake_case)]
+struct Attachments2Data {
FileName: String,
Key: String,
}
@@ -304,7 +279,7 @@ async fn post_ciphers_admin(
async fn post_ciphers_create(
data: JsonUpcase<ShareCipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let mut data: ShareCipherData = data.into_inner().data;
@@ -318,11 +293,11 @@ async fn post_ciphers_create(
// This check is usually only needed in update_cipher_from_data(), but we
// need it here as well to avoid creating an empty cipher in the call to
// cipher.save() below.
- enforce_personal_ownership_policy(Some(&data.Cipher), &headers, &mut conn).await?;
+ enforce_personal_ownership_policy(Some(&data.Cipher), &headers, &conn).await?;
let mut cipher = Cipher::new(data.Cipher.Type, data.Cipher.Name.clone());
cipher.user_uuid = Some(headers.user.uuid.clone());
- cipher.save(&mut conn).await?;
+ cipher.save(&conn).await?;
// When cloning a cipher, the Bitwarden clients seem to set this field
// based on the cipher being cloned (when creating a new cipher, it's set
@@ -332,7 +307,7 @@ async fn post_ciphers_create(
// or otherwise), we can just ignore this field entirely.
data.Cipher.LastKnownRevisionDate = None;
- share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt).await
+ share_cipher_by_uuid(&cipher.uuid, data, &headers, &conn, &nt).await
}
/// Called when creating a new user-owned cipher.
@@ -340,7 +315,7 @@ async fn post_ciphers_create(
async fn post_ciphers(
data: JsonUpcase<CipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let mut data: CipherData = data.into_inner().data;
@@ -357,7 +332,7 @@ async fn post_ciphers(
data,
&headers,
false,
- &mut conn,
+ &conn,
&nt,
UpdateType::SyncCipherCreate,
)
@@ -365,13 +340,7 @@ async fn post_ciphers(
Ok(Json(
cipher
- .to_json(
- &headers.host,
- &headers.user.uuid,
- None,
- CipherSyncType::User,
- &mut conn,
- )
+ .to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
.await,
))
}
@@ -386,7 +355,7 @@ async fn post_ciphers(
async fn enforce_personal_ownership_policy(
data: Option<&CipherData>,
headers: &Headers,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
if data.is_none() || data.unwrap().OrganizationId.is_none() {
let user_uuid = &headers.user.uuid;
@@ -403,7 +372,7 @@ pub async fn update_cipher_from_data(
data: CipherData,
headers: &Headers,
shared_to_collection: bool,
- conn: &mut DbConn,
+ conn: &DbConn,
nt: &Notify<'_>,
ut: UpdateType,
) -> EmptyResult {
@@ -469,36 +438,6 @@ pub async fn update_cipher_from_data(
None => err!("Folder doesn't exist"),
}
}
-
- // Modify attachments name and keys when rotating
- if let Some(attachments) = data.Attachments2 {
- for (id, attachment) in attachments {
- let mut saved_att = match Attachment::find_by_id(&id, conn).await {
- Some(att) => att,
- None => {
- // Warn and continue here.
- // A missing attachment means it was removed via an other client.
- // Also the Desktop Client supports removing attachments and save an update afterwards.
- // Bitwarden it self ignores these mismatches server side.
- warn!("Attachment {id} doesn't exist");
- continue;
- }
- };
-
- if saved_att.cipher_uuid != cipher.uuid {
- // Warn and break here since cloning ciphers provides attachment data but will not be cloned.
- // If we error out here it will break the whole cloning and causes empty ciphers to appear.
- warn!("Attachment is not owned by the cipher");
- break;
- }
-
- saved_att.akey = Some(attachment.Key);
- saved_att.file_name = attachment.FileName;
-
- saved_att.save(conn).await?;
- }
- }
-
// Cleanup cipher data, like removing the 'Response' key.
// This key is somewhere generated during Javascript so no way for us this fix this.
// Also, upstream only retrieves keys they actually want to store, and thus skip the 'Response' key.
@@ -589,10 +528,10 @@ struct RelationsData {
async fn post_ciphers_import(
data: JsonUpcase<ImportData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- enforce_personal_ownership_policy(None, &headers, &mut conn).await?;
+ enforce_personal_ownership_policy(None, &headers, &conn).await?;
let data: ImportData = data.into_inner().data;
@@ -604,9 +543,9 @@ async fn post_ciphers_import(
// Read and create the folders
let mut folders: Vec<_> = Vec::new();
- for folder in data.Folders.into_iter() {
+ for folder in data.Folders {
let mut new_folder = Folder::new(headers.user.uuid.clone(), folder.Name);
- new_folder.save(&mut conn).await?;
+ new_folder.save(&conn).await?;
folders.push(new_folder);
}
@@ -629,7 +568,7 @@ async fn post_ciphers_import(
cipher_data,
&headers,
false,
- &mut conn,
+ &conn,
&nt,
UpdateType::None,
)
@@ -637,7 +576,7 @@ async fn post_ciphers_import(
}
let mut user = headers.user;
- user.update_revision(&mut conn).await?;
+ user.update_revision(&conn).await?;
nt.send_user_update(UpdateType::SyncVault, &user).await;
Ok(())
@@ -682,14 +621,13 @@ async fn put_cipher(
uuid: &str,
data: JsonUpcase<CipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: CipherData = data.into_inner().data;
- let mut cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
+ let Some(mut cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
+ err!("Cipher doesn't exist")
};
// TODO: Check if only the folder ID or favorite status is being changed.
@@ -698,7 +636,7 @@ async fn put_cipher(
// Interestingly, upstream Bitwarden doesn't properly handle this either.
if !cipher
- .is_write_accessible_to_user(&headers.user.uuid, &mut conn)
+ .is_write_accessible_to_user(&headers.user.uuid, &conn)
.await
{
err!("Cipher is not write accessible")
@@ -709,7 +647,7 @@ async fn put_cipher(
data,
&headers,
false,
- &mut conn,
+ &conn,
&nt,
UpdateType::SyncCipherUpdate,
)
@@ -717,13 +655,7 @@ async fn put_cipher(
Ok(Json(
cipher
- .to_json(
- &headers.host,
- &headers.user.uuid,
- None,
- CipherSyncType::User,
- &mut conn,
- )
+ .to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
.await,
))
}
@@ -744,17 +676,16 @@ async fn put_cipher_partial(
uuid: &str,
data: JsonUpcase<PartialCipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: PartialCipherData = data.into_inner().data;
- let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
+ let Some(cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
+ err!("Cipher doesn't exist")
};
if let Some(ref folder_id) = data.FolderId {
- match Folder::find_by_uuid(folder_id, &mut conn).await {
+ match Folder::find_by_uuid(folder_id, &conn).await {
Some(folder) => {
if folder.user_uuid != headers.user.uuid {
err!("Folder is not owned by user")
@@ -766,22 +697,16 @@ async fn put_cipher_partial(
// Move cipher
cipher
- .move_to_folder(data.FolderId.clone(), &headers.user.uuid, &mut conn)
+ .move_to_folder(data.FolderId.clone(), &headers.user.uuid, &conn)
.await?;
// Update favorite
cipher
- .set_favorite(Some(data.Favorite), &headers.user.uuid, &mut conn)
+ .set_favorite(Some(data.Favorite), &headers.user.uuid, &conn)
.await?;
Ok(Json(
cipher
- .to_json(
- &headers.host,
- &headers.user.uuid,
- None,
- CipherSyncType::User,
- &mut conn,
- )
+ .to_json(&headers.user.uuid, None, CipherSyncType::User, &conn)
.await,
))
}
@@ -830,18 +755,17 @@ async fn post_collections_admin(
uuid: &str,
data: JsonUpcase<CollectionsAdminData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: CollectionsAdminData = data.into_inner().data;
- let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
+ let Some(cipher) = Cipher::find_by_uuid(uuid, &conn).await else {
+ err!("Cipher doesn't exist")
};
if !cipher
- .is_write_accessible_to_user(&headers.user.uuid, &mut conn)
+ .is_write_accessible_to_user(&headers.user.uuid, &conn)
.await
{
err!("Cipher is not write accessible")
@@ -849,26 +773,26 @@ async fn post_collections_admin(
let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect();
let current_collections: HashSet<String> = cipher
- .get_collections(headers.user.uuid.clone(), &mut conn)
+ .get_collections(headers.user.uuid.clone(), &conn)
.await
.iter()
.cloned()
.collect();
for collection in posted_collections.symmetric_difference(¤t_collections) {
- match Collection::find_by_uuid(collection, &mut conn).await {
+ match Collection::find_by_uuid(collection, &conn).await {
None => err!("Invalid collection ID provided"),
Some(collection) => {
if collection
- .is_writable_by_user(&headers.user.uuid, &mut conn)
+ .is_writable_by_user(&headers.user.uuid, &conn)
.await
{
if posted_collections.contains(&collection.uuid) {
// Add to collection
- CollectionCipher::save(&cipher.uuid, &collection.uuid, &mut conn).await?;
+ CollectionCipher::save(&cipher.uuid, &collection.uuid, &conn).await?;
} else {
// Remove from collection
- CollectionCipher::delete(&cipher.uuid, &collection.uuid, &mut conn).await?;
+ CollectionCipher::delete(&cipher.uuid, &collection.uuid, &conn).await?;
}
} else {
err!("No rights to modify the collection")
@@ -880,10 +804,10 @@ async fn post_collections_admin(
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
&cipher,
- &cipher.update_users_revision(&mut conn).await,
+ &cipher.update_users_revision(&conn).await,
&headers.device.uuid,
Some(Vec::from_iter(posted_collections)),
- &mut conn,
+ &conn,
)
.await;
Ok(())
@@ -901,12 +825,12 @@ async fn post_cipher_share(
uuid: &str,
data: JsonUpcase<ShareCipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: ShareCipherData = data.into_inner().data;
- share_cipher_by_uuid(uuid, data, &headers, &mut conn, &nt).await
+ share_cipher_by_uuid(uuid, data, &headers, &conn, &nt).await
}
#[put("/ciphers/<uuid>/share", data = "<data>")]
@@ -914,12 +838,11 @@ async fn put_cipher_share(
uuid: &str,
data: JsonUpcase<ShareCipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: ShareCipherData = data.into_inner().data;
-
- share_cipher_by_uuid(uuid, data, &headers, &mut conn, &nt).await
+ share_cipher_by_uuid(uuid, data, &headers, &conn, &nt).await
}
#[derive(Deserialize)]
@@ -933,25 +856,20 @@ struct ShareSelectedCipherData {
async fn put_cipher_share_selected(
data: JsonUpcase<ShareSelectedCipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let mut data: ShareSelectedCipherData = data.into_inner().data;
- let mut cipher_ids: Vec<String> = Vec::new();
-
if data.Ciphers.is_empty() {
err!("You must select at least one cipher.")
}
-
if data.CollectionIds.is_empty() {
err!("You must select at least one collection.")
}
-
- for cipher in data.Ciphers.iter() {
- match cipher.Id {
- Some(ref id) => cipher_ids.push(id.to_string()),
- None => err!("Request missing ids field"),
- };
+ for cipher in &data.Ciphers {
+ if cipher.Id.is_none() {
+ err!("Request missing ids field");
+ }
}
while let Some(cipher) = data.Ciphers.pop() {
@@ -961,9 +879,7 @@ async fn put_cipher_share_selected(
};
match shared_cipher_data.Cipher.Id.take() {
- Some(id) => {
- share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await?
- }
+ Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &conn, &nt).await?,
None => err!("Request missing ids field"),
};
}
@@ -975,7 +891,7 @@ async fn share_cipher_by_uuid(
uuid: &str,
data: ShareCipherData,
headers: &Headers,
- conn: &mut DbConn,
+ conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
@@ -1033,13 +949,7 @@ async fn share_cipher_by_uuid(
Ok(Json(
cipher
- .to_json(
- &headers.host,
- &headers.user.uuid,
- None,
- CipherSyncType::User,
- conn,
- )
+ .to_json(&headers.user.uuid, None, CipherSyncType::User, conn)
.await,
))
}
@@ -1050,36 +960,14 @@ async fn share_cipher_by_uuid(
/// Upstream added this v2 API to support direct download of attachments from
/// their object storage service. For self-hosted instances, it basically just
/// redirects to the same location as before the v2 API.
+#[allow(unused_variables)]
#[get("/ciphers/<uuid>/attachment/<attachment_id>")]
-async fn get_attachment(
- uuid: &str,
- attachment_id: &str,
- headers: Headers,
- mut conn: DbConn,
-) -> JsonResult {
- let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
- };
-
- if !cipher
- .is_accessible_to_user(&headers.user.uuid, &mut conn)
- .await
- {
- err!("Cipher is not accessible")
- }
-
- match Attachment::find_by_id(attachment_id, &mut conn).await {
- Some(attachment) if uuid == attachment.cipher_uuid => {
- Ok(Json(attachment.to_json(&headers.host)))
- }
- Some(_) => err!("Attachment doesn't belong to cipher"),
- None => err!("Attachment doesn't exist"),
- }
+fn get_attachment(uuid: &str, attachment_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
+ err!("Attachments are disabled")
}
#[derive(Deserialize)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
struct AttachmentRequestData {
Key: String,
FileName: String,
@@ -1087,413 +975,191 @@ struct AttachmentRequestData {
AdminRequest: Option<bool>, // true when attaching from an org vault view
}
-enum FileUploadType {
- Direct = 0,
- // Azure = 1, // only used upstream
-}
-
/// v2 API for creating an attachment associated with a cipher.
/// This redirects the client to the API it should use to upload the attachment.
/// For upstream's cloud-hosted service, it's an Azure object storage API.
/// For self-hosted instances, it's another API on the local instance.
+#[allow(unused_variables)]
#[post("/ciphers/<uuid>/attachment/v2", data = "<data>")]
-async fn post_attachment_v2(
+fn post_attachment_v2(
uuid: &str,
data: JsonUpcase<AttachmentRequestData>,
- headers: Headers,
- mut conn: DbConn,
+ _headers: Headers,
+ _conn: DbConn,
) -> JsonResult {
- let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
- };
-
- if !cipher
- .is_write_accessible_to_user(&headers.user.uuid, &mut conn)
- .await
- {
- err!("Cipher is not write accessible")
- }
-
- let attachment_id = crypto::generate_attachment_id();
- let data: AttachmentRequestData = data.into_inner().data;
- let attachment = Attachment::new(
- attachment_id.clone(),
- cipher.uuid.clone(),
- data.FileName,
- data.FileSize,
- Some(data.Key),
- );
- attachment
- .save(&mut conn)
- .await
- .expect("Error saving attachment");
-
- let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id);
- let response_key = match data.AdminRequest {
- Some(b) if b => "CipherMiniResponse",
- _ => "CipherResponse",
- };
-
- Ok(Json(json!({ // AttachmentUploadDataResponseModel
- "Object": "attachment-fileUpload",
- "AttachmentId": attachment_id,
- "Url": url,
- "FileUploadType": FileUploadType::Direct as i32,
- response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await,
- })))
+ err!("Attachments are disabled")
}
+#[allow(dead_code)]
#[derive(FromForm)]
struct UploadData<'f> {
key: Option<String>,
data: TempFile<'f>,
}
-/// Saves the data content of an attachment to a file. This is common code
-/// shared between the v2 and legacy attachment APIs.
-///
-/// When used with the legacy API, this function is responsible for creating
-/// the attachment database record, so `attachment` is None.
-///
-/// When used with the v2 API, post_attachment_v2() has already created the
-/// database record, which is passed in as `attachment`.
-#[allow(clippy::cast_lossless)]
-async fn save_attachment(
- mut attachment: Option<Attachment>,
- cipher_uuid: &str,
- data: Form<UploadData<'_>>,
- headers: &Headers,
- mut conn: DbConn,
- nt: Notify<'_>,
-) -> Result<(Cipher, DbConn), crate::error::Error> {
- let cipher = match Cipher::find_by_uuid(cipher_uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
- };
-
- if !cipher
- .is_write_accessible_to_user(&headers.user.uuid, &mut conn)
- .await
- {
- err!("Cipher is not write accessible")
- }
-
- // In the v2 API, the attachment record has already been created,
- // so the size limit needs to be adjusted to account for that.
- let size_adjust = match &attachment {
- None => 0, // Legacy API
- Some(a) => i64::from(a.file_size), // v2 API
- };
-
- let size_limit = if let Some(ref user_uuid) = cipher.user_uuid {
- match config::get_config().user_attachment_limit {
- Some(0) => err!("Attachments are disabled"),
- Some(limit_kb) => {
- let left = (limit_kb as i64 * 1024)
- - Attachment::size_by_user(user_uuid, &mut conn).await
- + size_adjust;
- if left <= 0 {
- err!("Attachment storage limit reached! Delete some attachments to free up space")
- }
- Some(left as u64)
- }
- None => None,
- }
- } else if let Some(ref org_uuid) = cipher.organization_uuid {
- match config::get_config().org_attachment_limit {
- Some(0) => err!("Attachments are disabled"),
- Some(limit_kb) => {
- let left = (limit_kb as i64 * 1024)
- - Attachment::size_by_org(org_uuid, &mut conn).await
- + size_adjust;
- if left <= 0 {
- err!("Attachment storage limit reached! Delete some attachments to free up space")
- }
- Some(left as u64)
- }
- None => None,
- }
- } else {
- err!("Cipher is neither owned by a user nor an organization");
- };
-
- let mut data = data.into_inner();
-
- if let Some(size_limit) = size_limit {
- if data.data.len() > size_limit {
- err!("Attachment storage limit exceeded with this file");
- }
- }
-
- let file_id = match &attachment {
- Some(attachment) => attachment.id.clone(), // v2 API
- None => crypto::generate_attachment_id(), // Legacy API
- };
-
- let folder_path = tokio::fs::canonicalize(Config::ATTACHMENTS_FOLDER)
- .await?
- .join(cipher_uuid);
- let file_path = folder_path.join(&file_id);
- tokio::fs::create_dir_all(&folder_path).await?;
-
- let size = data.data.len() as i32;
- if let Some(attachment) = &mut attachment {
- // v2 API
-
- // Check the actual size against the size initially provided by
- // the client. Upstream allows +/- 1 MiB deviation from this
- // size, but it's not clear when or why this is needed.
- const LEEWAY: i32 = 1024 * 1024; // 1 MiB
- let min_size = attachment.file_size - LEEWAY;
- let max_size = attachment.file_size + LEEWAY;
-
- if min_size <= size && size <= max_size {
- if size != attachment.file_size {
- // Update the attachment with the actual file size.
- attachment.file_size = size;
- attachment
- .save(&mut conn)
- .await
- .expect("Error updating attachment");
- }
- } else {
- attachment.delete(&mut conn).await.ok();
-
- err!(format!(
- "Attachment size mismatch (expected within [{min_size}, {max_size}], got {size})"
- ));
- }
- } else {
- // Legacy API
- let encrypted_filename = data
- .data
- .raw_name()
- .map(|s| s.dangerous_unsafe_unsanitized_raw().to_string());
-
- if encrypted_filename.is_none() {
- err!("No filename provided")
- }
- if data.key.is_none() {
- err!("No attachment key provided")
- }
- let attachment = Attachment::new(
- file_id,
- String::from(cipher_uuid),
- encrypted_filename.unwrap(),
- size,
- data.key,
- );
- attachment
- .save(&mut conn)
- .await
- .expect("Error saving attachment");
- }
-
- if let Err(_err) = data.data.persist_to(&file_path).await {
- data.data.move_copy_to(file_path).await?
- }
-
- nt.send_cipher_update(
- UpdateType::SyncCipherUpdate,
- &cipher,
- &cipher.update_users_revision(&mut conn).await,
- &headers.device.uuid,
- None,
- &mut conn,
- )
- .await;
- Ok((cipher, conn))
-}
-
/// v2 API for uploading the actual data content of an attachment.
/// This route needs a rank specified so that Rocket prioritizes the
/// /ciphers/<uuid>/attachment/v2 route, which would otherwise conflict
/// with this one.
+#[allow(unused_variables)]
#[post(
"/ciphers/<uuid>/attachment/<attachment_id>",
format = "multipart/form-data",
data = "<data>",
rank = 1
)]
-async fn post_attachment_v2_data(
+fn post_attachment_v2_data(
uuid: &str,
attachment_id: &str,
data: Form<UploadData<'_>>,
- headers: Headers,
- mut conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> EmptyResult {
- let attachment = match Attachment::find_by_id(attachment_id, &mut conn).await {
- Some(attachment) if uuid == attachment.cipher_uuid => Some(attachment),
- Some(_) => err!("Attachment doesn't belong to cipher"),
- None => err!("Attachment doesn't exist"),
- };
-
- save_attachment(attachment, uuid, data, &headers, conn, nt).await?;
-
- Ok(())
+ err!("Attachments are disabled")
}
/// Legacy API for creating an attachment associated with a cipher.
+#[allow(unused_variables)]
#[post(
"/ciphers/<uuid>/attachment",
format = "multipart/form-data",
data = "<data>"
)]
-async fn post_attachment(
+fn post_attachment(
uuid: &str,
data: Form<UploadData<'_>>,
- headers: Headers,
- conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> JsonResult {
- // Setting this as None signifies to save_attachment() that it should create
- // the attachment database record as well as saving the data to disk.
- let attachment = None;
-
- let (cipher, mut conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?;
-
- Ok(Json(
- cipher
- .to_json(
- &headers.host,
- &headers.user.uuid,
- None,
- CipherSyncType::User,
- &mut conn,
- )
- .await,
- ))
+ err!("Attachments are disabled")
}
+#[allow(unused_variables)]
#[post(
"/ciphers/<uuid>/attachment-admin",
format = "multipart/form-data",
data = "<data>"
)]
-async fn post_attachment_admin(
+fn post_attachment_admin(
uuid: &str,
data: Form<UploadData<'_>>,
- headers: Headers,
- conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> JsonResult {
- post_attachment(uuid, data, headers, conn, nt).await
+ err!("Attachments are disabled")
}
+#[allow(unused_variables)]
#[post(
"/ciphers/<uuid>/attachment/<attachment_id>/share",
format = "multipart/form-data",
data = "<data>"
)]
-async fn post_attachment_share(
+fn post_attachment_share(
uuid: &str,
attachment_id: &str,
data: Form<UploadData<'_>>,
- headers: Headers,
- mut conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> JsonResult {
- _delete_cipher_attachment_by_id(uuid, attachment_id, &headers, &mut conn, &nt).await?;
- post_attachment(uuid, data, headers, conn, nt).await
+ err!("Attachments are disabled")
}
+#[allow(unused_variables)]
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete-admin")]
-async fn delete_attachment_post_admin(
+fn delete_attachment_post_admin(
uuid: &str,
attachment_id: &str,
- headers: Headers,
- conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> EmptyResult {
- delete_attachment(uuid, attachment_id, headers, conn, nt).await
+ err!("Attachments are disabled")
}
+#[allow(unused_variables)]
#[post("/ciphers/<uuid>/attachment/<attachment_id>/delete")]
-async fn delete_attachment_post(
+fn delete_attachment_post(
uuid: &str,
attachment_id: &str,
- headers: Headers,
- conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> EmptyResult {
- delete_attachment(uuid, attachment_id, headers, conn, nt).await
+ err!("Attachments are disabled")
}
+#[allow(unused_variables)]
#[delete("/ciphers/<uuid>/attachment/<attachment_id>")]
-async fn delete_attachment(
+fn delete_attachment(
uuid: &str,
attachment_id: &str,
- headers: Headers,
- mut conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> EmptyResult {
- _delete_cipher_attachment_by_id(uuid, attachment_id, &headers, &mut conn, &nt).await
+ err!("Attachments are disabled")
}
+#[allow(unused_variables)]
#[delete("/ciphers/<uuid>/attachment/<attachment_id>/admin")]
-async fn delete_attachment_admin(
+fn delete_attachment_admin(
uuid: &str,
attachment_id: &str,
- headers: Headers,
- mut conn: DbConn,
- nt: Notify<'_>,
+ _headers: Headers,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> EmptyResult {
- _delete_cipher_attachment_by_id(uuid, attachment_id, &headers, &mut conn, &nt).await
+ err!("Attachments are disabled")
}
#[post("/ciphers/<uuid>/delete")]
async fn delete_cipher_post(
uuid: &str,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await
- // permanent delete
+ _delete_cipher_by_uuid(uuid, &headers, &conn, false, &nt).await
}
#[post("/ciphers/<uuid>/delete-admin")]
async fn delete_cipher_post_admin(
uuid: &str,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await
- // permanent delete
+ _delete_cipher_by_uuid(uuid, &headers, &conn, false, &nt).await
}
#[put("/ciphers/<uuid>/delete")]
async fn delete_cipher_put(
uuid: &str,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- _delete_cipher_by_uuid(uuid, &headers, &mut conn, true, &nt).await
- // soft delete
+ _delete_cipher_by_uuid(uuid, &headers, &conn, true, &nt).await
}
#[put("/ciphers/<uuid>/delete-admin")]
async fn delete_cipher_put_admin(
uuid: &str,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- _delete_cipher_by_uuid(uuid, &headers, &mut conn, true, &nt).await
+ _delete_cipher_by_uuid(uuid, &headers, &conn, true, &nt).await
}
#[delete("/ciphers/<uuid>")]
-async fn delete_cipher(
- uuid: &str,
- headers: Headers,
- mut conn: DbConn,
- nt: Notify<'_>,
-) -> EmptyResult {
- _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await
+async fn delete_cipher(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
+ _delete_cipher_by_uuid(uuid, &headers, &conn, false, &nt).await
// permanent delete
}
@@ -1501,11 +1167,10 @@ async fn delete_cipher(
async fn delete_cipher_admin(
uuid: &str,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await
- // permanent delete
+ _delete_cipher_by_uuid(uuid, &headers, &conn, false, &nt).await
}
#[delete("/ciphers", data = "<data>")]
@@ -1572,30 +1237,30 @@ async fn delete_cipher_selected_put_admin(
async fn restore_cipher_put(
uuid: &str,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
- _restore_cipher_by_uuid(uuid, &headers, &mut conn, &nt).await
+ _restore_cipher_by_uuid(uuid, &headers, &conn, &nt).await
}
#[put("/ciphers/<uuid>/restore-admin")]
async fn restore_cipher_put_admin(
uuid: &str,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
- _restore_cipher_by_uuid(uuid, &headers, &mut conn, &nt).await
+ _restore_cipher_by_uuid(uuid, &headers, &conn, &nt).await
}
#[put("/ciphers/restore", data = "<data>")]
async fn restore_cipher_selected(
data: JsonUpcase<Value>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
- _restore_multiple_ciphers(data, &headers, &mut conn, &nt).await
+ _restore_multiple_ciphers(data, &headers, &conn, &nt).await
}
#[derive(Deserialize)]
@@ -1609,14 +1274,14 @@ struct MoveCipherData {
async fn move_cipher_selected(
data: JsonUpcase<MoveCipherData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data = data.into_inner().data;
let user_uuid = headers.user.uuid;
if let Some(ref folder_id) = data.FolderId {
- match Folder::find_by_uuid(folder_id, &mut conn).await {
+ match Folder::find_by_uuid(folder_id, &conn).await {
Some(folder) => {
if folder.user_uuid != user_uuid {
err!("Folder is not owned by user")
@@ -1627,18 +1292,17 @@ async fn move_cipher_selected(
}
for uuid in data.Ids {
- let cipher = match Cipher::find_by_uuid(&uuid, &mut conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
+ let Some(cipher) = Cipher::find_by_uuid(&uuid, &conn).await else {
+ err!("Cipher doesn't exist")
};
- if !cipher.is_accessible_to_user(&user_uuid, &mut conn).await {
+ if !cipher.is_accessible_to_user(&user_uuid, &conn).await {
err!("Cipher is not accessible by user")
}
// Move cipher
cipher
- .move_to_folder(data.FolderId.clone(), &user_uuid, &mut conn)
+ .move_to_folder(data.FolderId.clone(), &user_uuid, &conn)
.await?;
nt.send_cipher_update(
@@ -1647,7 +1311,7 @@ async fn move_cipher_selected(
&[user_uuid.clone()],
&headers.device.uuid,
None,
- &mut conn,
+ &conn,
)
.await;
}
@@ -1676,62 +1340,56 @@ async fn delete_all(
organization: Option<OrganizationId>,
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: PasswordOrOtpData = data.into_inner().data;
let mut user = headers.user;
- data.validate(&user, true, &mut conn).await?;
-
- match organization {
- Some(org_data) => {
- // Organization ID in query params, purging organization vault
- match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &mut conn)
- .await
- {
- None => err!("You don't have permission to purge the organization vault"),
- Some(user_org) => {
- if user_org.atype == UserOrgType::Owner {
- Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?;
- nt.send_user_update(UpdateType::SyncVault, &user).await;
- Ok(())
- } else {
- err!("You don't have permission to purge the organization vault");
- }
+ data.validate(&user, true, &conn).await?;
+
+ if let Some(org_data) = organization {
+ // Organization ID in query params, purging organization vault
+ match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &conn).await {
+ None => err!("You don't have permission to purge the organization vault"),
+ Some(user_org) => {
+ if user_org.atype == UserOrgType::Owner {
+ Cipher::delete_all_by_organization(&org_data.org_id, &conn).await?;
+ nt.send_user_update(UpdateType::SyncVault, &user).await;
+ Ok(())
+ } else {
+ err!("You don't have permission to purge the organization vault");
}
}
}
- None => {
- // No organization ID in query params, purging user vault
- // Delete ciphers and their attachments
- for cipher in Cipher::find_owned_by_user(&user.uuid, &mut conn).await {
- cipher.delete(&mut conn).await?;
- }
+ } else {
+ // No organization ID in query params, purging user vault
+ // Delete ciphers and their attachments
+ for cipher in Cipher::find_owned_by_user(&user.uuid, &conn).await {
+ cipher.delete(&conn).await?;
+ }
- // Delete folders
- for f in Folder::find_by_user(&user.uuid, &mut conn).await {
- f.delete(&mut conn).await?;
- }
+ // Delete folders
+ for f in Folder::find_by_user(&user.uuid, &conn).await {
+ f.delete(&conn).await?;
+ }
- user.update_revision(&mut conn).await?;
- nt.send_user_update(UpdateType::SyncVault, &user).await;
+ user.update_revision(&conn).await?;
+ nt.send_user_update(UpdateType::SyncVault, &user).await;
- Ok(())
- }
+ Ok(())
}
}
async fn _delete_cipher_by_uuid(
uuid: &str,
headers: &Headers,
- conn: &mut DbConn,
+ conn: &DbConn,
soft_delete: bool,
nt: &Notify<'_>,
) -> EmptyResult {
- let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
+ let Some(mut cipher) = Cipher::find_by_uuid(uuid, conn).await else {
+ err!("Cipher doesn't exist")
};
if !cipher
@@ -1771,7 +1429,7 @@ async fn _delete_cipher_by_uuid(
async fn _delete_multiple_ciphers(
data: JsonUpcase<Value>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
soft_delete: bool,
nt: Notify<'_>,
) -> EmptyResult {
@@ -1787,7 +1445,7 @@ async fn _delete_multiple_ciphers(
for uuid in uuids {
if let error @ Err(_) =
- _delete_cipher_by_uuid(uuid, &headers, &mut conn, soft_delete, &nt).await
+ _delete_cipher_by_uuid(uuid, &headers, &conn, soft_delete, &nt).await
{
return error;
};
@@ -1799,12 +1457,11 @@ async fn _delete_multiple_ciphers(
async fn _restore_cipher_by_uuid(
uuid: &str,
headers: &Headers,
- conn: &mut DbConn,
+ conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
- let mut cipher = match Cipher::find_by_uuid(uuid, conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
+ let Some(mut cipher) = Cipher::find_by_uuid(uuid, conn).await else {
+ err!("Cipher doesn't exist")
};
if !cipher
@@ -1828,13 +1485,7 @@ async fn _restore_cipher_by_uuid(
.await;
Ok(Json(
cipher
- .to_json(
- &headers.host,
- &headers.user.uuid,
- None,
- CipherSyncType::User,
- conn,
- )
+ .to_json(&headers.user.uuid, None, CipherSyncType::User, conn)
.await,
))
}
@@ -1842,7 +1493,7 @@ async fn _restore_cipher_by_uuid(
async fn _restore_multiple_ciphers(
data: JsonUpcase<Value>,
headers: &Headers,
- conn: &mut DbConn,
+ conn: &DbConn,
nt: &Notify<'_>,
) -> JsonResult {
let data: Value = data.into_inner().data;
@@ -1869,62 +1520,16 @@ async fn _restore_multiple_ciphers(
"ContinuationToken": null
})))
}
-
-async fn _delete_cipher_attachment_by_id(
- uuid: &str,
- attachment_id: &str,
- headers: &Headers,
- conn: &mut DbConn,
- nt: &Notify<'_>,
-) -> EmptyResult {
- let attachment = match Attachment::find_by_id(attachment_id, conn).await {
- Some(attachment) => attachment,
- None => err!("Attachment doesn't exist"),
- };
-
- if attachment.cipher_uuid != uuid {
- err!("Attachment from other cipher")
- }
-
- let cipher = match Cipher::find_by_uuid(uuid, conn).await {
- Some(cipher) => cipher,
- None => err!("Cipher doesn't exist"),
- };
-
- if !cipher
- .is_write_accessible_to_user(&headers.user.uuid, conn)
- .await
- {
- err!("Cipher cannot be deleted by user")
- }
-
- // Delete attachment
- attachment.delete(conn).await?;
- nt.send_cipher_update(
- UpdateType::SyncCipherUpdate,
- &cipher,
- &cipher.update_users_revision(conn).await,
- &headers.device.uuid,
- None,
- conn,
- )
- .await;
- Ok(())
-}
-
/// This will hold all the necessary data to improve a full sync of all the ciphers
/// It can be used during the `Cipher::to_json()` call.
/// It will prevent the so called N+1 SQL issue by running just a few queries which will hold all the data needed.
/// This will not improve the speed of a single cipher.to_json() call that much, so better not to use it for those calls.
pub struct CipherSyncData {
- pub cipher_attachments: HashMap<String, Vec<Attachment>>,
pub cipher_folders: HashMap<String, String>,
pub cipher_favorites: HashSet<String>,
pub cipher_collections: HashMap<String, Vec<String>>,
pub user_organizations: HashMap<String, UserOrganization>,
pub user_collections: HashMap<String, CollectionUser>,
- pub user_collections_groups: HashMap<String, CollectionGroup>,
- pub user_group_full_access_for_organizations: HashSet<String>,
}
#[derive(Eq, PartialEq)]
@@ -1934,7 +1539,7 @@ pub enum CipherSyncType {
}
impl CipherSyncData {
- pub async fn new(user_uuid: &str, sync_type: CipherSyncType, conn: &mut DbConn) -> Self {
+ pub async fn new(user_uuid: &str, sync_type: CipherSyncType, conn: &DbConn) -> Self {
let cipher_folders: HashMap<String, String>;
let cipher_favorites: HashSet<String>;
match sync_type {
@@ -1959,20 +1564,6 @@ impl CipherSyncData {
cipher_favorites = HashSet::with_capacity(0);
}
}
-
- // Generate a list of Cipher UUID's containing a Vec with one or more Attachment records
- let user_org_uuids = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await;
- let attachments =
- Attachment::find_all_by_user_and_orgs(user_uuid, &user_org_uuids, conn).await;
- let mut cipher_attachments: HashMap<String, Vec<Attachment>> =
- HashMap::with_capacity(attachments.len());
- for attachment in attachments {
- cipher_attachments
- .entry(attachment.cipher_uuid.clone())
- .or_default()
- .push(attachment);
- }
-
// Generate a HashMap with the Cipher UUID as key and one or more Collection UUID's
let user_cipher_collections =
Cipher::get_collections_with_cipher_by_user(user_uuid.to_string(), conn).await;
@@ -2000,33 +1591,12 @@ impl CipherSyncData {
.into_iter()
.map(|uc| (uc.collection_uuid.clone(), uc))
.collect();
-
- // Generate a HashMap with the collections_uuid as key and the CollectionGroup record
- let user_collections_groups: HashMap<String, CollectionGroup> =
- CollectionGroup::find_by_user(user_uuid, conn)
- .await
- .into_iter()
- .map(|collection_group| {
- (collection_group.collections_uuid.clone(), collection_group)
- })
- .collect();
-
- // Get all organizations that the user has full access to via group assignment
- let user_group_full_access_for_organizations: HashSet<String> =
- Group::gather_user_organizations_full_access(user_uuid, conn)
- .await
- .into_iter()
- .collect();
-
Self {
- cipher_attachments,
cipher_folders,
cipher_favorites,
cipher_collections,
user_organizations,
user_collections,
- user_collections_groups,
- user_group_full_access_for_organizations,
}
}
}
diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs
@@ -1,10 +1,9 @@
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString},
auth::Headers,
- db::{models::*, DbConn},
+ db::DbConn,
};
-use rocket::{serde::json::Json, Route};
-use serde_json::Value;
+use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![
@@ -188,48 +187,8 @@ fn password_emergency_access(
err!("Emergency access is not allowed.")
}
+#[allow(unused_variables)]
#[get("/emergency-access/<emer_id>/policies")]
-async fn policies_emergency_access(
- emer_id: &str,
- headers: Headers,
- mut conn: DbConn,
-) -> JsonResult {
- let requesting_user = headers.user;
- let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await {
- Some(emer) => emer,
- None => err!("Emergency access not valid."),
- };
-
- if !is_valid_request(
- &emergency_access,
- &requesting_user.uuid,
- EmergencyAccessType::Takeover,
- ) {
- err!("Emergency access not valid.")
- }
-
- let grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await {
- Some(user) => user,
- None => err!("Grantor user not found."),
- };
-
- let policies = OrgPolicy::find_confirmed_by_user(&grantor_user.uuid, &mut conn);
- let policies_json: Vec<Value> = policies.await.iter().map(OrgPolicy::to_json).collect();
-
- Ok(Json(json!({
- "Data": policies_json,
- "Object": "list",
- "ContinuationToken": null
- })))
-}
-
-fn is_valid_request(
- emergency_access: &EmergencyAccess,
- requesting_user_uuid: &str,
- requested_access_type: EmergencyAccessType,
-) -> bool {
- emergency_access.grantee_uuid.is_some()
- && emergency_access.grantee_uuid.as_ref().unwrap() == requesting_user_uuid
- && emergency_access.status == EmergencyAccessStatus::RecoveryApproved as i32
- && emergency_access.atype == requested_access_type as i32
+fn policies_emergency_access(emer_id: &str, _headers: Headers, _conn: DbConn) -> JsonResult {
+ err!("Emergency access not valid.")
}
diff --git a/src/api/core/events.rs b/src/api/core/events.rs
@@ -1,7 +1,7 @@
use crate::{
api::{EmptyResult, JsonResult, JsonUpcaseVec},
auth::{AdminHeaders, Headers},
- db::{models::Event, DbConn},
+ db::DbConn,
};
use rocket::{form::FromForm, serde::json::Json, Route};
use serde_json::Value;
@@ -22,7 +22,7 @@ struct EventRange {
}
// Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::unnecessary_wraps)]
#[get("/organizations/<org_id>/events?<data..>")]
fn get_org_events(
org_id: &str,
@@ -30,17 +30,14 @@ fn get_org_events(
_headers: AdminHeaders,
_conn: DbConn,
) -> JsonResult {
- // Return an empty vec when we org events are disabled.
- // This prevents client errors
- let events_json = Vec::new();
Ok(Json(json!({
- "Data": events_json,
+ "Data": Vec::<Value>::new(),
"Object": "list",
- "ContinuationToken": get_continuation_token(&events_json),
+ "ContinuationToken": None::<&str>,
})))
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::unnecessary_wraps)]
#[get("/ciphers/<cipher_id>/events?<data..>")]
fn get_cipher_events(
cipher_id: &str,
@@ -48,17 +45,14 @@ fn get_cipher_events(
_headers: Headers,
_conn: DbConn,
) -> JsonResult {
- // Return an empty vec when we org events are disabled.
- // This prevents client errors
- let events_json = Vec::new();
Ok(Json(json!({
- "Data": events_json,
+ "Data": Vec::<Value>::new(),
"Object": "list",
- "ContinuationToken": get_continuation_token(&events_json),
+ "ContinuationToken": None::<&str>,
})))
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::unnecessary_wraps)]
#[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")]
fn get_user_events(
org_id: &str,
@@ -67,30 +61,12 @@ fn get_user_events(
_headers: AdminHeaders,
_conn: DbConn,
) -> JsonResult {
- // Return an empty vec when we org events are disabled.
- // This prevents client errors
- let events_json: Vec<Value> = Vec::new();
Ok(Json(json!({
- "Data": events_json,
+ "Data": Vec::<Value>::new(),
"Object": "list",
- "ContinuationToken": get_continuation_token(&events_json),
+ "ContinuationToken": None::<&str>,
})))
}
-
-fn get_continuation_token(events_json: &Vec<Value>) -> Option<&str> {
- // When the length of the vec equals the max page_size there probably is more data
- // When it is less, then all events are loaded.
- if events_json.len() as i64 == Event::PAGE_SIZE {
- if let Some(last_event) = events_json.last() {
- last_event["date"].as_str()
- } else {
- None
- }
- } else {
- None
- }
-}
-
/// ###############################################################################################################
/// /events routes
pub fn main_routes() -> Vec<Route> {
@@ -104,7 +80,7 @@ struct EventCollection;
// Upstream:
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs
// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::unnecessary_wraps)]
#[post("/collect", format = "application/json", data = "<data>")]
fn post_events_collect(
data: JsonUpcaseVec<EventCollection>,
diff --git a/src/api/core/folders.rs b/src/api/core/folders.rs
@@ -1,19 +1,26 @@
-use rocket::serde::json::Json;
-use serde_json::Value;
-
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, Notify, UpdateType},
auth::Headers,
- db::{models::*, DbConn},
+ db::{models::Folder, DbConn},
};
+use rocket::serde::json::Json;
+use serde_json::Value;
pub fn routes() -> Vec<rocket::Route> {
- routes![get_folders, get_folder, post_folders, post_folder, put_folder, delete_folder_post, delete_folder,]
+ routes![
+ get_folders,
+ get_folder,
+ post_folders,
+ post_folder,
+ put_folder,
+ delete_folder_post,
+ delete_folder,
+ ]
}
#[get("/folders")]
-async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
- let folders = Folder::find_by_user(&headers.user.uuid, &mut conn).await;
+async fn get_folders(headers: Headers, conn: DbConn) -> Json<Value> {
+ let folders = Folder::find_by_user(&headers.user.uuid, &conn).await;
let folders_json: Vec<Value> = folders.iter().map(Folder::to_json).collect();
Json(json!({
@@ -24,10 +31,9 @@ async fn get_folders(headers: Headers, mut conn: DbConn) -> Json<Value> {
}
#[get("/folders/<uuid>")]
-async fn get_folder(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult {
- let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
- Some(folder) => folder,
- _ => err!("Invalid folder"),
+async fn get_folder(uuid: &str, headers: Headers, conn: DbConn) -> JsonResult {
+ let Some(folder) = Folder::find_by_uuid(uuid, &conn).await else {
+ err!("Invalid folder")
};
if folder.user_uuid != headers.user.uuid {
@@ -44,13 +50,24 @@ pub struct FolderData {
}
#[post("/folders", data = "<data>")]
-async fn post_folders(data: JsonUpcase<FolderData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
+async fn post_folders(
+ data: JsonUpcase<FolderData>,
+ headers: Headers,
+ conn: DbConn,
+ nt: Notify<'_>,
+) -> JsonResult {
let data: FolderData = data.into_inner().data;
let mut folder = Folder::new(headers.user.uuid, data.Name);
- folder.save(&mut conn).await?;
- nt.send_folder_update(UpdateType::SyncFolderCreate, &folder, &headers.device.uuid, &mut conn).await;
+ folder.save(&conn).await?;
+ nt.send_folder_update(
+ UpdateType::SyncFolderCreate,
+ &folder,
+ &headers.device.uuid,
+ &conn,
+ )
+ .await;
Ok(Json(folder.to_json()))
}
@@ -71,14 +88,13 @@ async fn put_folder(
uuid: &str,
data: JsonUpcase<FolderData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> JsonResult {
let data: FolderData = data.into_inner().data;
- let mut folder = match Folder::find_by_uuid(uuid, &mut conn).await {
- Some(folder) => folder,
- _ => err!("Invalid folder"),
+ let Some(mut folder) = Folder::find_by_uuid(uuid, &conn).await else {
+ err!("Invalid folder")
};
if folder.user_uuid != headers.user.uuid {
@@ -87,22 +103,32 @@ async fn put_folder(
folder.name = data.Name;
- folder.save(&mut conn).await?;
- nt.send_folder_update(UpdateType::SyncFolderUpdate, &folder, &headers.device.uuid, &mut conn).await;
+ folder.save(&conn).await?;
+ nt.send_folder_update(
+ UpdateType::SyncFolderUpdate,
+ &folder,
+ &headers.device.uuid,
+ &conn,
+ )
+ .await;
Ok(Json(folder.to_json()))
}
#[post("/folders/<uuid>/delete")]
-async fn delete_folder_post(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
+async fn delete_folder_post(
+ uuid: &str,
+ headers: Headers,
+ conn: DbConn,
+ nt: Notify<'_>,
+) -> EmptyResult {
delete_folder(uuid, headers, conn, nt).await
}
#[delete("/folders/<uuid>")]
-async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult {
- let folder = match Folder::find_by_uuid(uuid, &mut conn).await {
- Some(folder) => folder,
- _ => err!("Invalid folder"),
+async fn delete_folder(uuid: &str, headers: Headers, conn: DbConn, nt: Notify<'_>) -> EmptyResult {
+ let Some(folder) = Folder::find_by_uuid(uuid, &conn).await else {
+ err!("Invalid folder")
};
if folder.user_uuid != headers.user.uuid {
@@ -110,8 +136,14 @@ async fn delete_folder(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notif
}
// Delete the actual folder entry
- folder.delete(&mut conn).await?;
-
- nt.send_folder_update(UpdateType::SyncFolderDelete, &folder, &headers.device.uuid, &mut conn).await;
+ folder.delete(&conn).await?;
+
+ nt.send_folder_update(
+ UpdateType::SyncFolderDelete,
+ &folder,
+ &headers.device.uuid,
+ &conn,
+ )
+ .await;
Ok(())
}
diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs
@@ -99,7 +99,7 @@ struct EquivDomainData {
async fn post_eq_domains(
data: JsonUpcase<EquivDomainData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
_nt: Notify<'_>,
) -> JsonResult {
let data: EquivDomainData = data.into_inner().data;
@@ -113,7 +113,7 @@ async fn post_eq_domains(
user.excluded_globals = to_string(&excluded_globals).unwrap_or_else(|_| "[]".to_string());
user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string());
- user.save(&mut conn).await?;
+ user.save(&conn).await?;
Ok(Json(json!({})))
}
@@ -144,7 +144,7 @@ pub fn now() -> Json<String> {
}
#[get("/version")]
-fn version() -> Json<&'static str> {
+const fn version() -> Json<&'static str> {
Json(crate::VERSION)
}
diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs
@@ -4,10 +4,15 @@ use crate::{
EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString,
PasswordOrOtpData, UpdateType,
},
- auth::{
- decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders,
+ auth::{AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders},
+ db::{
+ models::{
+ Cipher, Collection, CollectionCipher, CollectionUser, OrgPolicy, OrgPolicyErr,
+ OrgPolicyType, Organization, OrganizationApiKey, TwoFactor, User, UserOrgStatus,
+ UserOrgType, UserOrganization,
+ },
+ DbConn,
},
- db::{models::*, DbConn},
error::Error,
util::convert_json_key_lcase_first,
};
@@ -119,7 +124,7 @@ struct OrganizationUpdateData {
}
#[derive(Deserialize)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
struct NewCollectionData {
Name: String,
Groups: Vec<NewCollectionObjectData>,
@@ -159,13 +164,13 @@ async fn delete_organization(
org_id: &str,
data: JsonUpcase<PasswordOrOtpData>,
headers: OwnerHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
let data: PasswordOrOtpData = data.into_inner().data;
- data.validate(&headers.user, true, &mut conn).await?;
- match Organization::find_by_uuid(org_id, &mut conn).await {
+ data.validate(&headers.user, true, &conn).await?;
+ match Organization::find_by_uuid(org_id, &conn).await {
None => err!("Organization not found"),
- Some(org) => org.delete(&mut conn).await,
+ Some(org) => org.delete(&conn).await,
}
}
@@ -180,29 +185,29 @@ async fn post_delete_organization(
}
#[post("/organizations/<org_id>/leave")]
-async fn leave_organization(org_id: &str, headers: Headers, mut conn: DbConn) -> EmptyResult {
- match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
+async fn leave_organization(org_id: &str, headers: Headers, conn: DbConn) -> EmptyResult {
+ match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &conn).await {
None => err!("User not part of organization"),
Some(user_org) => {
if user_org.atype == UserOrgType::Owner
&& UserOrganization::count_confirmed_by_org_and_type(
org_id,
UserOrgType::Owner,
- &mut conn,
+ &conn,
)
.await
<= 1
{
err!("The last owner can't leave")
}
- user_org.delete(&mut conn).await
+ user_org.delete(&conn).await
}
}
}
#[get("/organizations/<org_id>")]
-async fn get_organization(org_id: &str, _headers: OwnerHeaders, mut conn: DbConn) -> JsonResult {
- match Organization::find_by_uuid(org_id, &mut conn).await {
+async fn get_organization(org_id: &str, _headers: OwnerHeaders, conn: DbConn) -> JsonResult {
+ match Organization::find_by_uuid(org_id, &conn).await {
Some(organization) => Ok(Json(organization.to_json())),
None => err!("Can't find organization details"),
}
@@ -223,25 +228,24 @@ async fn post_organization(
org_id: &str,
_headers: OwnerHeaders,
data: JsonUpcase<OrganizationUpdateData>,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: OrganizationUpdateData = data.into_inner().data;
- let mut org = match Organization::find_by_uuid(org_id, &mut conn).await {
- Some(organization) => organization,
- None => err!("Can't find organization details"),
+ let Some(mut org) = Organization::find_by_uuid(org_id, &conn).await else {
+ err!("Can't find organization details")
};
org.name = data.Name;
org.billing_email = data.BillingEmail;
- org.save(&mut conn).await?;
+ org.save(&conn).await?;
Ok(Json(org.to_json()))
}
// GET /api/collections?writeOnly=false
#[get("/collections")]
-async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value> {
+async fn get_user_collections(headers: Headers, conn: DbConn) -> Json<Value> {
Json(json!({
"Data":
- Collection::find_by_user_uuid(headers.user.uuid.clone(), &mut conn).await
+ Collection::find_by_user_uuid(headers.user.uuid.clone(), &conn).await
.iter()
.map(Collection::to_json)
.collect::<Value>(),
@@ -254,10 +258,10 @@ async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value>
async fn get_org_collections(
org_id: &str,
_headers: ManagerHeadersLoose,
- mut conn: DbConn,
+ conn: DbConn,
) -> Json<Value> {
Json(json!({
- "Data": _get_org_collections(org_id, &mut conn).await,
+ "Data": _get_org_collections(org_id, &conn).await,
"Object": "list",
"ContinuationToken": null,
}))
@@ -267,16 +271,16 @@ async fn get_org_collections(
async fn get_org_collections_details(
org_id: &str,
headers: ManagerHeadersLoose,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let mut data = Vec::new();
- let user_org =
- match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
- Some(u) => u,
- None => err!("User is not part of organization"),
- };
- let coll_users = CollectionUser::find_by_organization(org_id, &mut conn).await;
- for col in Collection::find_by_organization(org_id, &mut conn).await {
+ let Some(user_org) =
+ UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &conn).await
+ else {
+ err!("User is not part of organization")
+ };
+ let coll_users = CollectionUser::find_by_organization(org_id, &conn).await;
+ for col in Collection::find_by_organization(org_id, &conn).await {
let groups: Vec<Value> = Vec::new();
let mut assigned = false;
let users: Vec<Value> = coll_users
@@ -301,7 +305,7 @@ async fn get_org_collections_details(
json_object["Users"] = json!(users);
json_object["Groups"] = json!(groups);
json_object["Object"] = json!("collectionAccessDetails");
- data.push(json_object)
+ data.push(json_object);
}
Ok(Json(json!({
@@ -311,7 +315,7 @@ async fn get_org_collections_details(
})))
}
-async fn _get_org_collections(org_id: &str, conn: &mut DbConn) -> Value {
+async fn _get_org_collections(org_id: &str, conn: &DbConn) -> Value {
Collection::find_by_organization(org_id, conn)
.await
.iter()
@@ -324,31 +328,18 @@ async fn post_organization_collections(
org_id: &str,
headers: ManagerHeadersLoose,
data: JsonUpcase<NewCollectionData>,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: NewCollectionData = data.into_inner().data;
- let org = match Organization::find_by_uuid(org_id, &mut conn).await {
- Some(organization) => organization,
- None => err!("Can't find organization details"),
+ let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
+ err!("Can't find organization details")
};
let collection = Collection::new(org.uuid, data.Name, data.ExternalId);
- collection.save(&mut conn).await?;
- for group in data.Groups {
- CollectionGroup::new(
- collection.uuid.clone(),
- group.Id,
- group.ReadOnly,
- group.HidePasswords,
- )
- .save(&mut conn)
- .await?;
- }
-
+ collection.save(&conn).await?;
for user in data.Users {
- let org_user = match UserOrganization::find_by_uuid(&user.Id, &mut conn).await {
- Some(u) => u,
- None => err!("User is not part of organization"),
+ let Some(org_user) = UserOrganization::find_by_uuid(&user.Id, &conn).await else {
+ err!("User is not part of organization")
};
if org_user.access_all {
@@ -360,7 +351,7 @@ async fn post_organization_collections(
&collection.uuid,
user.ReadOnly,
user.HidePasswords,
- &mut conn,
+ &conn,
)
.await?;
}
@@ -371,7 +362,7 @@ async fn post_organization_collections(
&collection.uuid,
false,
false,
- &mut conn,
+ &conn,
)
.await?;
}
@@ -396,18 +387,16 @@ async fn post_organization_collection_update(
col_id: &str,
_headers: ManagerHeaders,
data: JsonUpcase<NewCollectionData>,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: NewCollectionData = data.into_inner().data;
- let org = match Organization::find_by_uuid(org_id, &mut conn).await {
- Some(organization) => organization,
- None => err!("Can't find organization details"),
+ let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
+ err!("Can't find organization details")
};
- let mut collection = match Collection::find_by_uuid(col_id, &mut conn).await {
- Some(collection) => collection,
- None => err!("Collection not found"),
+ let Some(mut collection) = Collection::find_by_uuid(col_id, &conn).await else {
+ err!("Collection not found")
};
if collection.org_uuid != org.uuid {
@@ -419,26 +408,11 @@ async fn post_organization_collection_update(
Some(external_id) if !external_id.trim().is_empty() => Some(external_id),
_ => None,
};
- collection.save(&mut conn).await?;
- CollectionGroup::delete_all_by_collection(col_id, &mut conn).await?;
-
- for group in data.Groups {
- CollectionGroup::new(
- String::from(col_id),
- group.Id,
- group.ReadOnly,
- group.HidePasswords,
- )
- .save(&mut conn)
- .await?;
- }
-
- CollectionUser::delete_all_by_collection(col_id, &mut conn).await?;
-
+ collection.save(&conn).await?;
+ CollectionUser::delete_all_by_collection(col_id, &conn).await?;
for user in data.Users {
- let org_user = match UserOrganization::find_by_uuid(&user.Id, &mut conn).await {
- Some(u) => u,
- None => err!("User is not part of organization"),
+ let Some(org_user) = UserOrganization::find_by_uuid(&user.Id, &conn).await else {
+ err!("User is not part of organization")
};
if org_user.access_all {
@@ -450,7 +424,7 @@ async fn post_organization_collection_update(
col_id,
user.ReadOnly,
user.HidePasswords,
- &mut conn,
+ &conn,
)
.await?;
}
@@ -464,9 +438,9 @@ async fn delete_organization_collection_user(
col_id: &str,
org_user_id: &str,
_headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- let collection = match Collection::find_by_uuid(col_id, &mut conn).await {
+ let collection = match Collection::find_by_uuid(col_id, &conn).await {
None => err!("Collection not found"),
Some(collection) => {
if collection.org_uuid == org_id {
@@ -477,18 +451,18 @@ async fn delete_organization_collection_user(
}
};
- match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await {
+ match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &conn).await {
None => err!("User not found in organization"),
Some(user_org) => {
match CollectionUser::find_by_collection_and_user(
&collection.uuid,
&user_org.user_uuid,
- &mut conn,
+ &conn,
)
.await
{
None => err!("User not assigned to collection"),
- Some(col_user) => col_user.delete(&mut conn).await,
+ Some(col_user) => col_user.delete(&conn).await,
}
}
}
@@ -509,7 +483,7 @@ async fn _delete_organization_collection(
org_id: &str,
col_id: &str,
_headers: &ManagerHeaders,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
match Collection::find_by_uuid(col_id, conn).await {
None => err!("Collection not found"),
@@ -528,9 +502,9 @@ async fn delete_organization_collection(
org_id: &str,
col_id: &str,
headers: ManagerHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- _delete_organization_collection(org_id, col_id, &headers, &mut conn).await
+ _delete_organization_collection(org_id, col_id, &headers, &conn).await
}
#[derive(Deserialize, Debug)]
@@ -549,9 +523,9 @@ async fn post_organization_collection_delete(
col_id: &str,
headers: ManagerHeaders,
_data: JsonUpcase<DeleteCollectionData>,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- _delete_organization_collection(org_id, col_id, &headers, &mut conn).await
+ _delete_organization_collection(org_id, col_id, &headers, &conn).await
}
#[derive(Deserialize, Debug)]
@@ -566,7 +540,7 @@ async fn bulk_delete_organization_collections(
org_id: &str,
headers: ManagerHeadersLoose,
data: JsonUpcase<BulkCollectionIds>,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
let data: BulkCollectionIds = data.into_inner().data;
if org_id != data.OrganizationId {
@@ -575,10 +549,10 @@ async fn bulk_delete_organization_collections(
let collections = data.Ids;
- let headers = ManagerHeaders::from_loose(headers, &collections, &mut conn).await?;
+ let headers = ManagerHeaders::from_loose(headers, &collections, &conn).await?;
for col_id in collections {
- _delete_organization_collection(org_id, &col_id, &headers, &mut conn).await?
+ _delete_organization_collection(org_id, &col_id, &headers, &conn).await?;
}
Ok(())
}
@@ -588,29 +562,27 @@ async fn get_org_collection_detail(
org_id: &str,
coll_id: &str,
headers: ManagerHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
- match Collection::find_by_uuid_and_user(coll_id, headers.user.uuid.clone(), &mut conn).await {
+ match Collection::find_by_uuid_and_user(coll_id, headers.user.uuid.clone(), &conn).await {
None => err!("Collection not found"),
Some(collection) => {
if collection.org_uuid != org_id {
err!("Collection is not owned by organization")
}
- let user_org =
- match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn)
- .await
- {
- Some(u) => u,
- None => err!("User is not part of organization"),
- };
+ let Some(user_org) =
+ UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &conn).await
+ else {
+ err!("User is not part of organization")
+ };
let groups: Vec<Value> = Vec::new();
let mut assigned = false;
let users: Vec<Value> =
CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(
&collection.uuid,
- &mut conn,
+ &conn,
)
.await
.iter()
@@ -645,18 +617,18 @@ async fn get_collection_users(
org_id: &str,
coll_id: &str,
_headers: ManagerHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
// Get org and collection, check that collection is from org
- let collection = match Collection::find_by_uuid_and_org(coll_id, org_id, &mut conn).await {
+ let collection = match Collection::find_by_uuid_and_org(coll_id, org_id, &conn).await {
None => err!("Collection not found in Organization"),
Some(collection) => collection,
};
let mut user_list = Vec::new();
- for col_user in CollectionUser::find_by_collection(&collection.uuid, &mut conn).await {
+ for col_user in CollectionUser::find_by_collection(&collection.uuid, &conn).await {
user_list.push(
- UserOrganization::find_by_user_and_org(&col_user.user_uuid, org_id, &mut conn)
+ UserOrganization::find_by_user_and_org(&col_user.user_uuid, org_id, &conn)
.await
.unwrap()
.to_json_user_access_restrictions(&col_user),
@@ -672,10 +644,10 @@ async fn put_collection_users(
coll_id: &str,
data: JsonUpcaseVec<CollectionData>,
_headers: ManagerHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
// Get org and collection, check that collection is from org
- if Collection::find_by_uuid_and_org(coll_id, org_id, &mut conn)
+ if Collection::find_by_uuid_and_org(coll_id, org_id, &conn)
.await
.is_none()
{
@@ -683,27 +655,19 @@ async fn put_collection_users(
}
// Delete all the user-collections
- CollectionUser::delete_all_by_collection(coll_id, &mut conn).await?;
+ CollectionUser::delete_all_by_collection(coll_id, &conn).await?;
// And then add all the received ones (except if the user has access_all)
for d in data.iter().map(|d| &d.data) {
- let user = match UserOrganization::find_by_uuid(&d.Id, &mut conn).await {
- Some(u) => u,
- None => err!("User is not part of organization"),
+ let Some(user) = UserOrganization::find_by_uuid(&d.Id, &conn).await else {
+ err!("User is not part of organization")
};
if user.access_all {
continue;
}
- CollectionUser::save(
- &user.user_uuid,
- coll_id,
- d.ReadOnly,
- d.HidePasswords,
- &mut conn,
- )
- .await?;
+ CollectionUser::save(&user.user_uuid, coll_id, d.ReadOnly, d.HidePasswords, &conn).await?;
}
Ok(())
@@ -716,23 +680,21 @@ struct OrgIdData {
}
#[get("/ciphers/organization-details?<data..>")]
-async fn get_org_details(data: OrgIdData, headers: Headers, mut conn: DbConn) -> Json<Value> {
+async fn get_org_details(data: OrgIdData, headers: Headers, conn: DbConn) -> Json<Value> {
Json(json!({
- "Data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await,
+ "Data": _get_org_details(&data.organization_id, &headers.user.uuid, &conn).await,
"Object": "list",
"ContinuationToken": null,
}))
}
-async fn _get_org_details(org_id: &str, host: &str, user_uuid: &str, conn: &mut DbConn) -> Value {
+async fn _get_org_details(org_id: &str, user_uuid: &str, conn: &DbConn) -> Value {
let ciphers = Cipher::find_by_org(org_id, conn).await;
let cipher_sync_data = CipherSyncData::new(user_uuid, CipherSyncType::Organization, conn).await;
-
let mut ciphers_json = Vec::with_capacity(ciphers.len());
for c in ciphers {
ciphers_json.push(
c.to_json(
- host,
user_uuid,
Some(&cipher_sync_data),
CipherSyncType::Organization,
@@ -757,15 +719,15 @@ async fn get_org_users(
data: GetOrgUserData,
org_id: &str,
_headers: ManagerHeadersLoose,
- mut conn: DbConn,
+ conn: DbConn,
) -> Json<Value> {
let mut users_json = Vec::new();
- for u in UserOrganization::find_by_org(org_id, &mut conn).await {
+ for u in UserOrganization::find_by_org(org_id, &conn).await {
users_json.push(
u.to_json_user_details(
data.include_collections.unwrap_or(false),
data.include_groups.unwrap_or(false),
- &mut conn,
+ &conn,
)
.await,
);
@@ -783,11 +745,11 @@ async fn post_org_keys(
org_id: &str,
data: JsonUpcase<OrgKeyData>,
_headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: OrgKeyData = data.into_inner().data;
- let mut org = match Organization::find_by_uuid(org_id, &mut conn).await {
+ let mut org = match Organization::find_by_uuid(org_id, &conn).await {
Some(organization) => {
if organization.private_key.is_some() && organization.public_key.is_some() {
err!("Organization Keys already exist")
@@ -800,7 +762,7 @@ async fn post_org_keys(
org.private_key = Some(data.EncryptedPrivateKey);
org.public_key = Some(data.PublicKey);
- org.save(&mut conn).await?;
+ org.save(&conn).await?;
Ok(Json(json!({
"Object": "organizationKeys",
@@ -818,7 +780,7 @@ struct CollectionData {
}
#[derive(Deserialize)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
struct InviteData {
Emails: Vec<String>,
Groups: Vec<String>,
@@ -827,79 +789,15 @@ struct InviteData {
AccessAll: Option<bool>,
}
+#[allow(unused_variables)]
#[post("/organizations/<org_id>/users/invite", data = "<data>")]
-async fn send_invite(
+fn send_invite(
org_id: &str,
data: JsonUpcase<InviteData>,
- headers: AdminHeaders,
- mut conn: DbConn,
+ _headers: AdminHeaders,
+ _conn: DbConn,
) -> EmptyResult {
- let data: InviteData = data.into_inner().data;
-
- let new_type = match UserOrgType::from_str(&data.Type.into_string()) {
- Some(new_type) => new_type as i32,
- None => err!("Invalid type"),
- };
-
- if new_type != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
- err!("Only Owners can invite Managers, Admins or Owners")
- }
-
- for email in data.Emails.iter() {
- let email = email.to_lowercase();
- let mut user_org_status = UserOrgStatus::Invited as i32;
- let user = match User::find_by_mail(&email, &mut conn).await {
- None => {
- err!(format!("User does not exist: {email}"))
- }
- Some(user) => {
- if UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn)
- .await
- .is_some()
- {
- err!(format!("User already in organization: {email}"))
- } else {
- // automatically accept existing users if mail is disabled
- if !user.password_hash.is_empty() {
- user_org_status = UserOrgStatus::Accepted as i32;
- }
- user
- }
- }
- };
-
- let mut new_user = UserOrganization::new(user.uuid.clone(), String::from(org_id));
- let access_all = data.AccessAll.unwrap_or(false);
- new_user.access_all = access_all;
- new_user.atype = new_type;
- new_user.status = user_org_status;
-
- // If no accessAll, add the collections received
- if !access_all {
- for col in data.Collections.iter().flatten() {
- match Collection::find_by_uuid_and_org(&col.Id, org_id, &mut conn).await {
- None => err!("Collection not found in Organization"),
- Some(collection) => {
- CollectionUser::save(
- &user.uuid,
- &collection.uuid,
- col.ReadOnly,
- col.HidePasswords,
- &mut conn,
- )
- .await?;
- }
- }
- }
- }
- new_user.save(&mut conn).await?;
- for group in data.Groups.iter() {
- let mut group_entry = GroupUser::new(String::from(group), user.uuid.clone());
- group_entry.save(&mut conn).await?;
- }
- }
-
- Ok(())
+ err!("No more organizations are allowed.")
}
#[allow(unused_variables)]
#[post("/organizations/<org_id>/users/reinvite", data = "<data>")]
@@ -909,19 +807,8 @@ fn bulk_reinvite_user(
_headers: AdminHeaders,
_conn: DbConn,
) -> Json<Value> {
- let data: OrgBulkIds = data.into_inner().data;
- let mut bulk_response = Vec::new();
- for org_user_id in data.Ids {
- bulk_response.push(json!(
- {
- "Object": "OrganizationBulkConfirmResponseModel",
- "Id": org_user_id,
- "Error": format!("{:?}", crate::error::Error::new("Invitations are not allowed.", "Invitations are not allowed."))
- }
- ))
- }
Json(json!({
- "Data": bulk_response,
+ "Data": Vec::<Value>::new(),
"Object": "list",
"ContinuationToken": null
}))
@@ -939,178 +826,50 @@ fn reinvite_user(
}
#[derive(Deserialize)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
struct AcceptData {
Token: String,
ResetPasswordKey: Option<String>,
}
+#[allow(unused_variables)]
#[post("/organizations/<org_id>/users/<_org_user_id>/accept", data = "<data>")]
-async fn accept_invite(
+fn accept_invite(
org_id: &str,
_org_user_id: &str,
data: JsonUpcase<AcceptData>,
- mut conn: DbConn,
+ _conn: DbConn,
) -> EmptyResult {
- // The web-vault passes org_id and org_user_id in the URL, but we are just reading them from the JWT instead
- let data: AcceptData = data.into_inner().data;
- let claims = decode_invite(&data.Token)?;
-
- match User::find_by_mail(&claims.email, &mut conn).await {
- Some(_) => {
- Invitation::take(&claims.email, &mut conn).await;
-
- if let (Some(user_org), Some(org)) = (&claims.user_org_id, &claims.org_id) {
- let mut user_org =
- match UserOrganization::find_by_uuid_and_org(user_org, org, &mut conn).await {
- Some(user_org) => user_org,
- None => err!("Error accepting the invitation"),
- };
-
- if user_org.status != UserOrgStatus::Invited as i32 {
- err!("User already accepted the invitation")
- }
-
- let master_password_required =
- OrgPolicy::org_is_reset_password_auto_enroll(org, &mut conn).await;
- if data.ResetPasswordKey.is_none() && master_password_required {
- err!("Reset password key is required, but not provided.");
- }
-
- // This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type
- // It returns different error messages per function.
- if user_org.atype < UserOrgType::Admin {
- match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, &mut conn)
- .await
- {
- Ok(_) => {}
- Err(OrgPolicyErr::TwoFactorMissing) => {
- err!("You cannot join this organization until you enable two-step login on your user account");
- }
- Err(OrgPolicyErr::SingleOrgEnforced) => {
- err!("You cannot join this organization because you are a member of an organization which forbids it");
- }
- }
- }
-
- user_org.status = UserOrgStatus::Accepted as i32;
-
- if master_password_required {
- user_org.reset_password_key = data.ResetPasswordKey;
- }
-
- user_org.save(&mut conn).await?;
- }
- }
- None => err!("Invited user not found"),
- }
- Ok(())
+ err!("No more organizations are allowed.")
}
+#[allow(unused_variables)]
#[post("/organizations/<org_id>/users/confirm", data = "<data>")]
-async fn bulk_confirm_invite(
+fn bulk_confirm_invite(
org_id: &str,
data: JsonUpcase<Value>,
- headers: AdminHeaders,
- mut conn: DbConn,
- nt: Notify<'_>,
+ _headers: AdminHeaders,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> Json<Value> {
- let data = data.into_inner().data;
-
- let mut bulk_response = Vec::new();
- match data["Keys"].as_array() {
- Some(keys) => {
- for invite in keys {
- let org_user_id = invite["Id"].as_str().unwrap_or_default();
- let user_key = invite["Key"].as_str().unwrap_or_default();
- let err_msg =
- match _confirm_invite(org_id, org_user_id, user_key, &headers, &mut conn, &nt)
- .await
- {
- Ok(_) => String::new(),
- Err(e) => format!("{e:?}"),
- };
-
- bulk_response.push(json!(
- {
- "Object": "OrganizationBulkConfirmResponseModel",
- "Id": org_user_id,
- "Error": err_msg
- }
- ));
- }
- }
- None => panic!("No keys to confirm"),
- }
-
Json(json!({
- "Data": bulk_response,
+ "Data": Vec::<Value>::new(),
"Object": "list",
"ContinuationToken": null
}))
}
+#[allow(unused_variables)]
#[post("/organizations/<org_id>/users/<org_user_id>/confirm", data = "<data>")]
-async fn confirm_invite(
+fn confirm_invite(
org_id: &str,
org_user_id: &str,
data: JsonUpcase<Value>,
- headers: AdminHeaders,
- mut conn: DbConn,
- nt: Notify<'_>,
-) -> EmptyResult {
- let data = data.into_inner().data;
- let user_key = data["Key"].as_str().unwrap_or_default();
- _confirm_invite(org_id, org_user_id, user_key, &headers, &mut conn, &nt).await
-}
-
-async fn _confirm_invite(
- org_id: &str,
- org_user_id: &str,
- key: &str,
- headers: &AdminHeaders,
- conn: &mut DbConn,
- nt: &Notify<'_>,
+ _headers: AdminHeaders,
+ _conn: DbConn,
+ _nt: Notify<'_>,
) -> EmptyResult {
- if key.is_empty() || org_user_id.is_empty() {
- err!("Key or UserId is not set, unable to process request");
- }
-
- let mut user_to_confirm =
- match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
- Some(user) => user,
- None => err!("The specified user isn't a member of the organization"),
- };
-
- if user_to_confirm.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
- err!("Only Owners can confirm Managers, Admins or Owners")
- }
-
- if user_to_confirm.status != UserOrgStatus::Accepted as i32 {
- err!("User in invalid state")
- }
-
- // This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type
- // It returns different error messages per function.
- if user_to_confirm.atype < UserOrgType::Admin {
- match OrgPolicy::is_user_allowed(&user_to_confirm.user_uuid, org_id, true, conn).await {
- Ok(_) => {}
- Err(OrgPolicyErr::TwoFactorMissing) => {
- err!("You cannot confirm this user because it has no two-step login method activated");
- }
- Err(OrgPolicyErr::SingleOrgEnforced) => {
- err!("You cannot confirm this user because it is a member of an organization which forbids it");
- }
- }
- }
-
- user_to_confirm.status = UserOrgStatus::Confirmed as i32;
- user_to_confirm.akey = key.to_string();
- let save_result = user_to_confirm.save(conn).await;
- if let Some(user) = User::find_by_uuid(&user_to_confirm.user_uuid, conn).await {
- nt.send_user_update(UpdateType::SyncOrgKeys, &user).await;
- }
- save_result
+ err!("No more organizations are allowed.")
}
#[get("/organizations/<org_id>/users/<org_user_id>?<data..>")]
@@ -1119,11 +878,11 @@ async fn get_user(
org_user_id: &str,
data: GetOrgUserData,
_headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
- let user = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await {
- Some(user) => user,
- None => err!("The specified user isn't a member of the organization"),
+ let Some(user) = UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &conn).await
+ else {
+ err!("The specified user isn't a member of the organization")
};
// In this case, when groups are requested we also need to include collections.
@@ -1133,14 +892,14 @@ async fn get_user(
user.to_json_user_details(
data.include_collections.unwrap_or(include_groups),
include_groups,
- &mut conn,
+ &conn,
)
.await,
))
}
#[derive(Deserialize)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
struct EditUserData {
Type: NumberOrString,
Collections: Option<Vec<CollectionData>>,
@@ -1173,20 +932,19 @@ async fn edit_user(
org_user_id: &str,
data: JsonUpcase<EditUserData>,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
let data: EditUserData = data.into_inner().data;
- let new_type = match UserOrgType::from_str(&data.Type.into_string()) {
- Some(new_type) => new_type,
- None => err!("Invalid type"),
+ let Some(new_type) = UserOrgType::from_str(&data.Type.into_string()) else {
+ err!("Invalid type")
};
- let mut user_to_edit =
- match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await {
- Some(user) => user,
- None => err!("The specified user isn't member of the organization"),
- };
+ let Some(mut user_to_edit) =
+ UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &conn).await
+ else {
+ err!("The specified user isn't member of the organization")
+ };
if new_type != user_to_edit.atype
&& (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin)
@@ -1204,7 +962,7 @@ async fn edit_user(
&& user_to_edit.status == UserOrgStatus::Confirmed as i32
{
// Removing owner permission, check that there is at least one other confirmed owner
- if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, &mut conn)
+ if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, &conn)
.await
<= 1
{
@@ -1215,8 +973,8 @@ async fn edit_user(
// This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type
// It returns different error messages per function.
if new_type < UserOrgType::Admin {
- match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, org_id, true, &mut conn).await {
- Ok(_) => {}
+ match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, org_id, true, &conn).await {
+ Ok(()) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
err!("You cannot modify this user to this type because it has no two-step login method activated");
}
@@ -1230,20 +988,17 @@ async fn edit_user(
user_to_edit.atype = new_type as i32;
// Delete all the odd collections
- for c in CollectionUser::find_by_organization_and_user_uuid(
- org_id,
- &user_to_edit.user_uuid,
- &mut conn,
- )
- .await
+ for c in
+ CollectionUser::find_by_organization_and_user_uuid(org_id, &user_to_edit.user_uuid, &conn)
+ .await
{
- c.delete(&mut conn).await?;
+ c.delete(&conn).await?;
}
// If no accessAll, add the collections received
if !data.AccessAll {
for col in data.Collections.iter().flatten() {
- match Collection::find_by_uuid_and_org(&col.Id, org_id, &mut conn).await {
+ match Collection::find_by_uuid_and_org(&col.Id, org_id, &conn).await {
None => err!("Collection not found in Organization"),
Some(collection) => {
CollectionUser::save(
@@ -1251,21 +1006,14 @@ async fn edit_user(
&collection.uuid,
col.ReadOnly,
col.HidePasswords,
- &mut conn,
+ &conn,
)
.await?;
}
}
}
}
-
- GroupUser::delete_all_by_user(&user_to_edit.uuid, &mut conn).await?;
-
- for group in data.Groups.iter().flatten() {
- let mut group_entry = GroupUser::new(String::from(group), user_to_edit.uuid.clone());
- group_entry.save(&mut conn).await?;
- }
- user_to_edit.save(&mut conn).await
+ user_to_edit.save(&conn).await
}
#[delete("/organizations/<org_id>/users", data = "<data>")]
@@ -1273,15 +1021,15 @@ async fn bulk_delete_user(
org_id: &str,
data: JsonUpcase<OrgBulkIds>,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> Json<Value> {
let data: OrgBulkIds = data.into_inner().data;
let mut bulk_response = Vec::new();
for org_user_id in data.Ids {
- let err_msg = match _delete_user(org_id, &org_user_id, &headers, &mut conn, &nt).await {
- Ok(_) => String::new(),
+ let err_msg = match _delete_user(org_id, &org_user_id, &headers, &conn, &nt).await {
+ Ok(()) => String::new(),
Err(e) => format!("{e:?}"),
};
@@ -1291,7 +1039,7 @@ async fn bulk_delete_user(
"Id": org_user_id,
"Error": err_msg
}
- ))
+ ));
}
Json(json!({
@@ -1306,10 +1054,10 @@ async fn delete_user(
org_id: &str,
org_user_id: &str,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- _delete_user(org_id, org_user_id, &headers, &mut conn, &nt).await
+ _delete_user(org_id, org_user_id, &headers, &conn, &nt).await
}
#[post("/organizations/<org_id>/users/<org_user_id>/delete")]
@@ -1317,24 +1065,24 @@ async fn post_delete_user(
org_id: &str,
org_user_id: &str,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
- _delete_user(org_id, org_user_id, &headers, &mut conn, &nt).await
+ _delete_user(org_id, org_user_id, &headers, &conn, &nt).await
}
async fn _delete_user(
org_id: &str,
org_user_id: &str,
headers: &AdminHeaders,
- conn: &mut DbConn,
+ conn: &DbConn,
nt: &Notify<'_>,
) -> EmptyResult {
- let user_to_delete =
- match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
- Some(user) => user,
- None => err!("User to delete isn't member of the organization"),
- };
+ let Some(user_to_delete) =
+ UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await
+ else {
+ err!("User to delete isn't member of the organization")
+ };
if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner {
err!("Only Owners can delete Admins or Owners")
@@ -1362,7 +1110,7 @@ async fn bulk_public_keys(
org_id: &str,
data: JsonUpcase<OrgBulkIds>,
_headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> Json<Value> {
let data: OrgBulkIds = data.into_inner().data;
@@ -1371,19 +1119,23 @@ async fn bulk_public_keys(
// If the user does not exists, just ignore it, and do not return any information regarding that UserOrg UUID.
// The web-vault will then ignore that user for the following steps.
for user_org_id in data.Ids {
- match UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &mut conn).await {
- Some(user_org) => match User::find_by_uuid(&user_org.user_uuid, &mut conn).await {
- Some(user) => bulk_response.push(json!(
+ if let Some(user_org) =
+ UserOrganization::find_by_uuid_and_org(&user_org_id, org_id, &conn).await
+ {
+ if let Some(user) = User::find_by_uuid(&user_org.user_uuid, &conn).await {
+ bulk_response.push(json!(
{
"Object": "organizationUserPublicKeyResponseModel",
"Id": user_org_id,
"UserId": user.uuid,
"Key": user.public_key
}
- )),
- None => debug!("User doesn't exist"),
- },
- None => debug!("UserOrg doesn't exist"),
+ ));
+ } else {
+ debug!("User doesn't exist");
+ }
+ } else {
+ debug!("UserOrg doesn't exist");
}
}
@@ -1419,7 +1171,7 @@ async fn post_org_import(
query: OrgIdData,
data: JsonUpcase<ImportData>,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
nt: Notify<'_>,
) -> EmptyResult {
let data: ImportData = data.into_inner().data;
@@ -1434,7 +1186,7 @@ async fn post_org_import(
let mut collections = Vec::new();
for coll in data.Collections {
let collection = Collection::new(org_id.clone(), coll.Name, coll.ExternalId);
- if collection.save(&mut conn).await.is_err() {
+ if collection.save(&conn).await.is_err() {
collections.push(Err(Error::new(
"Failed to create Collection",
"Failed to create Collection",
@@ -1460,7 +1212,7 @@ async fn post_org_import(
cipher_data,
&headers,
false,
- &mut conn,
+ &conn,
&nt,
UpdateType::None,
)
@@ -1478,16 +1230,16 @@ async fn post_org_import(
Err(_) => err!("Failed to assign to collection"),
};
- CollectionCipher::save(cipher_id, coll_id, &mut conn).await?;
+ CollectionCipher::save(cipher_id, coll_id, &conn).await?;
}
let mut user = headers.user;
- user.update_revision(&mut conn).await
+ user.update_revision(&conn).await
}
#[get("/organizations/<org_id>/policies")]
-async fn list_policies(org_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> Json<Value> {
- let policies = OrgPolicy::find_by_org(org_id, &mut conn).await;
+async fn list_policies(org_id: &str, _headers: AdminHeaders, conn: DbConn) -> Json<Value> {
+ let policies = OrgPolicy::find_by_org(org_id, &conn).await;
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
Json(json!({
@@ -1498,12 +1250,10 @@ async fn list_policies(org_id: &str, _headers: AdminHeaders, mut conn: DbConn) -
}
#[get("/organizations/<org_id>/policies/token?<token>")]
-async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> JsonResult {
+async fn list_policies_token(org_id: &str, token: &str, conn: DbConn) -> JsonResult {
let invite = crate::auth::decode_invite(token)?;
-
- let invite_org_id = match invite.org_id {
- Some(invite_org_id) => invite_org_id,
- None => err!("Invalid token"),
+ let Some(invite_org_id) = invite.org_id else {
+ err!("Invalid token")
};
if invite_org_id != org_id {
@@ -1511,7 +1261,7 @@ async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> Jso
}
// TODO: We receive the invite token as ?token=<>, validate it contains the org id
- let policies = OrgPolicy::find_by_org(org_id, &mut conn).await;
+ let policies = OrgPolicy::find_by_org(org_id, &conn).await;
let policies_json: Vec<Value> = policies.iter().map(OrgPolicy::to_json).collect();
Ok(Json(json!({
@@ -1526,18 +1276,15 @@ async fn get_policy(
org_id: &str,
pol_type: i32,
_headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
- let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
- Some(pt) => pt,
- None => err!("Invalid or unsupported policy type"),
- };
-
- let policy = match OrgPolicy::find_by_org_and_type(org_id, pol_type_enum, &mut conn).await {
- Some(p) => p,
- None => OrgPolicy::new(String::from(org_id), pol_type_enum, "null".to_string()),
+ let Some(pol_type_enum) = OrgPolicyType::from_i32(pol_type) else {
+ err!("Invalid or unsupported policy type")
};
-
+ let policy = (OrgPolicy::find_by_org_and_type(org_id, pol_type_enum, &conn).await).map_or_else(
+ || OrgPolicy::new(String::from(org_id), pol_type_enum, "null".to_string()),
+ core::convert::identity,
+ );
Ok(Json(policy.to_json()))
}
@@ -1555,22 +1302,18 @@ async fn put_policy(
pol_type: i32,
data: Json<PolicyData>,
_headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: PolicyData = data.into_inner();
- let pol_type_enum = match OrgPolicyType::from_i32(pol_type) {
- Some(pt) => pt,
- None => err!("Invalid or unsupported policy type"),
+ let Some(pol_type_enum) = OrgPolicyType::from_i32(pol_type) else {
+ err!("Invalid or unsupported policy type")
};
// When enabling the TwoFactorAuthentication policy, remove this org's members that do have 2FA
if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled {
- for member in UserOrganization::find_by_org(org_id, &mut conn)
- .await
- .into_iter()
- {
- let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &mut conn)
+ for member in UserOrganization::find_by_org(org_id, &conn).await {
+ let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &conn)
.await
.is_empty();
@@ -1580,43 +1323,36 @@ async fn put_policy(
&& member.atype < UserOrgType::Admin
&& member.status != UserOrgStatus::Invited as i32
{
- member.delete(&mut conn).await?;
+ member.delete(&conn).await?;
}
}
}
// When enabling the SingleOrg policy, remove this org's members that are members of other orgs
if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled {
- for member in UserOrganization::find_by_org(org_id, &mut conn)
- .await
- .into_iter()
- {
+ for member in UserOrganization::find_by_org(org_id, &conn).await {
// Policy only applies to non-Owner/non-Admin members who have accepted joining the org
// Exclude invited and revoked users when checking for this policy.
// Those users will not be allowed to accept or be activated because of the policy checks done there.
// We check if the count is larger then 1, because it includes this organization also.
if member.atype < UserOrgType::Admin
&& member.status != UserOrgStatus::Invited as i32
- && UserOrganization::count_accepted_and_confirmed_by_user(
- &member.user_uuid,
- &mut conn,
- )
- .await
+ && UserOrganization::count_accepted_and_confirmed_by_user(&member.user_uuid, &conn)
+ .await
> 1
{
- member.delete(&mut conn).await?;
+ member.delete(&conn).await?;
}
}
}
-
- let mut policy = match OrgPolicy::find_by_org_and_type(org_id, pol_type_enum, &mut conn).await {
- Some(p) => p,
- None => OrgPolicy::new(String::from(org_id), pol_type_enum, "{}".to_string()),
- };
-
+ let mut policy = (OrgPolicy::find_by_org_and_type(org_id, pol_type_enum, &conn).await)
+ .map_or_else(
+ || OrgPolicy::new(String::from(org_id), pol_type_enum, "{}".to_string()),
+ |p| p,
+ );
policy.enabled = data.enabled;
policy.data = serde_json::to_string(&data.data)?;
- policy.save(&mut conn).await?;
+ policy.save(&conn).await?;
Ok(Json(policy.to_json()))
}
@@ -1708,7 +1444,7 @@ async fn import(
org_id: &str,
data: JsonUpcase<OrgImportData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
let data = data.into_inner().data;
@@ -1718,7 +1454,7 @@ async fn import(
// as opposed to upstream which only removes auto-imported users.
// User needs to be admin or owner to use the Directory Connector
- match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
+ match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &conn).await {
Some(user_org) if user_org.atype >= UserOrgType::Admin => { /* Okay, nothing to do */ }
Some(_) => err!("User has insufficient permissions to use Directory Connector"),
None => err!("User not part of organization"),
@@ -1728,17 +1464,17 @@ async fn import(
if user_data.Deleted {
// If user is marked for deletion and it exists, delete it
if let Some(user_org) =
- UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &mut conn).await
+ UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &conn).await
{
- user_org.delete(&mut conn).await?;
+ user_org.delete(&conn).await?;
}
// If user is not part of the organization, but it exists
- } else if UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &mut conn)
+ } else if UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &conn)
.await
.is_none()
{
- if let Some(user) = User::find_by_mail(&user_data.Email, &mut conn).await {
+ if let Some(user) = User::find_by_mail(&user_data.Email, &conn).await {
let user_org_status = UserOrgStatus::Accepted as i32;
let mut new_org_user =
@@ -1746,21 +1482,21 @@ async fn import(
new_org_user.access_all = false;
new_org_user.atype = UserOrgType::User as i32;
new_org_user.status = user_org_status;
- new_org_user.save(&mut conn).await?;
+ new_org_user.save(&conn).await?;
}
}
}
// If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
if data.OverwriteExisting {
for user_org in
- UserOrganization::find_by_org_and_type(org_id, UserOrgType::User, &mut conn).await
+ UserOrganization::find_by_org_and_type(org_id, UserOrgType::User, &conn).await
{
- if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &mut conn)
+ if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &conn)
.await
.map(|u| u.email)
{
if !data.Users.iter().any(|u| u.Email == user_email) {
- user_org.delete(&mut conn).await?;
+ user_org.delete(&conn).await?;
}
}
}
@@ -1775,9 +1511,9 @@ async fn deactivate_organization_user(
org_id: &str,
org_user_id: &str,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- _revoke_organization_user(org_id, org_user_id, &headers, &mut conn).await
+ _revoke_organization_user(org_id, org_user_id, &headers, &conn).await
}
// Pre web-vault v2022.9.x endpoint
@@ -1796,9 +1532,9 @@ async fn revoke_organization_user(
org_id: &str,
org_user_id: &str,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- _revoke_organization_user(org_id, org_user_id, &headers, &mut conn).await
+ _revoke_organization_user(org_id, org_user_id, &headers, &conn).await
}
#[put("/organizations/<org_id>/users/revoke", data = "<data>")]
@@ -1806,7 +1542,7 @@ async fn bulk_revoke_organization_user(
org_id: &str,
data: JsonUpcase<Value>,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> Json<Value> {
let data = data.into_inner().data;
@@ -1816,9 +1552,8 @@ async fn bulk_revoke_organization_user(
for org_user_id in org_users {
let org_user_id = org_user_id.as_str().unwrap_or_default();
let err_msg =
- match _revoke_organization_user(org_id, org_user_id, &headers, &mut conn).await
- {
- Ok(_) => String::new(),
+ match _revoke_organization_user(org_id, org_user_id, &headers, &conn).await {
+ Ok(()) => String::new(),
Err(e) => format!("{e:?}"),
};
@@ -1845,7 +1580,7 @@ async fn _revoke_organization_user(
org_id: &str,
org_user_id: &str,
headers: &AdminHeaders,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
Some(mut user_org) if user_org.status > UserOrgStatus::Revoked as i32 => {
@@ -1881,9 +1616,9 @@ async fn activate_organization_user(
org_id: &str,
org_user_id: &str,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- _restore_organization_user(org_id, org_user_id, &headers, &mut conn).await
+ _restore_organization_user(org_id, org_user_id, &headers, &conn).await
}
// Pre web-vault v2022.9.x endpoint
@@ -1902,9 +1637,9 @@ async fn restore_organization_user(
org_id: &str,
org_user_id: &str,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- _restore_organization_user(org_id, org_user_id, &headers, &mut conn).await
+ _restore_organization_user(org_id, org_user_id, &headers, &conn).await
}
#[put("/organizations/<org_id>/users/restore", data = "<data>")]
@@ -1912,7 +1647,7 @@ async fn bulk_restore_organization_user(
org_id: &str,
data: JsonUpcase<Value>,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> Json<Value> {
let data = data.into_inner().data;
@@ -1921,17 +1656,11 @@ async fn bulk_restore_organization_user(
Some(org_users) => {
for org_user_id in org_users {
let org_user_id = org_user_id.as_str().unwrap_or_default();
- let err_msg = match _restore_organization_user(
- org_id,
- org_user_id,
- &headers,
- &mut conn,
- )
- .await
- {
- Ok(_) => String::new(),
- Err(e) => format!("{e:?}"),
- };
+ let err_msg =
+ match _restore_organization_user(org_id, org_user_id, &headers, &conn).await {
+ Ok(()) => String::new(),
+ Err(e) => format!("{e:?}"),
+ };
bulk_response.push(json!(
{
@@ -1956,7 +1685,7 @@ async fn _restore_organization_user(
org_id: &str,
org_user_id: &str,
headers: &AdminHeaders,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await {
Some(mut user_org) if user_org.status < UserOrgStatus::Accepted as i32 => {
@@ -1971,7 +1700,7 @@ async fn _restore_organization_user(
// It returns different error messages per function.
if user_org.atype < UserOrgType::Admin {
match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, conn).await {
- Ok(_) => {}
+ Ok(()) => {}
Err(OrgPolicyErr::TwoFactorMissing) => {
err!("You cannot restore this user because it has no two-step login method activated");
}
@@ -1990,12 +1719,11 @@ async fn _restore_organization_user(
Ok(())
}
-#[allow(unused_variables)]
+#[allow(unused_variables, clippy::unnecessary_wraps)]
#[get("/organizations/<org_id>/groups")]
fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, _conn: DbConn) -> JsonResult {
- let groups: Vec<Value> = Vec::new();
Ok(Json(json!({
- "Data": groups,
+ "Data": Vec::<Value>::new(),
"Object": "list",
"ContinuationToken": null,
})))
@@ -2013,17 +1741,15 @@ struct SelectionReadOnly {
}
impl SelectionReadOnly {
- pub fn to_collection_user_details_read_only(
- collection_user: &CollectionUser,
- ) -> SelectionReadOnly {
- SelectionReadOnly {
+ fn to_collection_user_details_read_only(collection_user: &CollectionUser) -> Self {
+ Self {
Id: collection_user.user_uuid.clone(),
ReadOnly: collection_user.read_only,
HidePasswords: collection_user.hide_passwords,
}
}
- pub fn to_json(&self) -> Value {
+ fn to_json(&self) -> Value {
json!(self)
}
}
@@ -2078,21 +1804,16 @@ fn post_delete_group(
org_id: &str,
group_id: &str,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> EmptyResult {
- _delete_group(org_id, group_id, &headers, &mut conn)
+ _delete_group(org_id, group_id, &headers, &conn)
}
#[delete("/organizations/<org_id>/groups/<group_id>")]
-fn delete_group(
- org_id: &str,
- group_id: &str,
- headers: AdminHeaders,
- mut conn: DbConn,
-) -> EmptyResult {
- _delete_group(org_id, group_id, &headers, &mut conn)
+fn delete_group(org_id: &str, group_id: &str, headers: AdminHeaders, conn: DbConn) -> EmptyResult {
+ _delete_group(org_id, group_id, &headers, &conn)
}
-fn _delete_group(_: &str, _: &str, _: &AdminHeaders, _: &mut DbConn) -> EmptyResult {
+fn _delete_group(_: &str, _: &str, _: &AdminHeaders, _: &DbConn) -> EmptyResult {
err!("Group support is disabled");
}
@@ -2211,10 +1932,9 @@ struct OrganizationUserResetPasswordRequest {
}
#[get("/organizations/<org_id>/keys")]
-async fn get_organization_keys(org_id: &str, mut conn: DbConn) -> JsonResult {
- let org = match Organization::find_by_uuid(org_id, &mut conn).await {
- Some(organization) => organization,
- None => err!("Organization not found"),
+async fn get_organization_keys(org_id: &str, conn: DbConn) -> JsonResult {
+ let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
+ err!("Organization not found")
};
Ok(Json(json!({
@@ -2234,68 +1954,48 @@ async fn put_reset_password(
org_user_id: &str,
_headers: AdminHeaders,
data: JsonUpcase<OrganizationUserResetPasswordRequest>,
- mut conn: DbConn,
+ conn: DbConn,
_nt: Notify<'_>,
) -> EmptyResult {
- let org = match Organization::find_by_uuid(org_id, &mut conn).await {
- Some(org) => org,
- None => err!("Required organization not found"),
+ let Some(org) = Organization::find_by_uuid(org_id, &conn).await else {
+ err!("Required organization not found")
};
- let org_user =
- match UserOrganization::find_by_uuid_and_org(org_user_id, &org.uuid, &mut conn).await {
- Some(user) => user,
- None => err!("User to reset isn't member of required organization"),
- };
- match User::find_by_uuid(&org_user.user_uuid, &mut conn).await {
+ let Some(org_user) =
+ UserOrganization::find_by_uuid_and_org(org_user_id, &org.uuid, &conn).await
+ else {
+ err!("User to reset isn't member of required organization")
+ };
+ match User::find_by_uuid(&org_user.user_uuid, &conn).await {
Some(_) => err!("Password reset is not supported on an email-disabled instance."),
None => err!("User not found"),
}
}
+#[allow(unused_variables)]
#[get("/organizations/<org_id>/users/<org_user_id>/reset-password-details")]
-async fn get_reset_password_details(
+fn get_reset_password_details(
org_id: &str,
org_user_id: &str,
_headers: AdminHeaders,
- mut conn: DbConn,
+ _conn: DbConn,
) -> JsonResult {
- match Organization::find_by_uuid(org_id, &mut conn).await {
- Some(_) => {
- let org_user = match UserOrganization::find_by_uuid_and_org(
- org_user_id,
- org_id,
- &mut conn,
- )
- .await
- {
- Some(user) => user,
- None => err!("User to reset isn't member of required organization"),
- };
- match User::find_by_uuid(&org_user.user_uuid, &mut conn).await {
- Some(_) => err!("Password reset is not supported on an email-disabled instance."),
- None => err!("User not found"),
- }
- }
- None => err!("Required organization not found"),
- }
+ err!("Password reset is not supported on an email-disabled instance.")
}
+
#[allow(unused_variables)]
#[put(
"/organizations/<org_id>/users/<org_user_id>/reset-password-enrollment",
data = "<data>"
)]
-async fn put_reset_password_enrollment(
+fn put_reset_password_enrollment(
org_id: &str,
org_user_id: &str,
- headers: Headers,
+ _headers: Headers,
data: JsonUpcase<OrganizationUserResetPasswordEnrollmentRequest>,
- mut conn: DbConn,
+ _conn: DbConn,
) -> EmptyResult {
- match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await {
- Some(_) => err!("Password reset is not supported on an email-disabled instance."),
- None => err!("User to enroll isn't member of required organization"),
- }
+ err!("Password reset is not supported on an email-disabled instance.")
}
// This is a new function active since the v2022.9.x clients.
@@ -2306,7 +2006,7 @@ async fn put_reset_password_enrollment(
// We need to convert all keys so they have the first character to be a lowercase.
// Else the export will be just an empty JSON file.
#[get("/organizations/<org_id>/export")]
-async fn get_org_export(org_id: &str, headers: AdminHeaders, mut conn: DbConn) -> Json<Value> {
+async fn get_org_export(org_id: &str, headers: AdminHeaders, conn: DbConn) -> Json<Value> {
use semver::{Version, VersionReq};
// Since version v2023.1.0 the format of the export is different.
@@ -2314,25 +2014,23 @@ async fn get_org_export(org_id: &str, headers: AdminHeaders, mut conn: DbConn) -
// Therefore, we will check for any version smaller then v2023.1.0 and return a different response.
// If we can't determine the version, we will use the latest default v2023.1.0 and higher.
// https://github.com/bitwarden/server/blob/9ca93381ce416454734418c3a9f99ab49747f1b6/src/Api/Controllers/OrganizationExportController.cs#L44
- let use_list_response_model = if let Some(client_version) = headers.client_version {
+ let use_list_response_model = headers.client_version.map_or(false, |client_version| {
let ver_match = VersionReq::parse("<2023.1.0").unwrap();
let client_version = Version::parse(&client_version).unwrap();
ver_match.matches(&client_version)
- } else {
- false
- };
+ });
// Also both main keys here need to be lowercase, else the export will fail.
if use_list_response_model {
// Backwards compatible pre v2023.1.0 response
Json(json!({
"collections": {
- "data": convert_json_key_lcase_first(_get_org_collections(org_id, &mut conn).await),
+ "data": convert_json_key_lcase_first(_get_org_collections(org_id, &conn).await),
"object": "list",
"continuationToken": null,
},
"ciphers": {
- "data": convert_json_key_lcase_first(_get_org_details(org_id, &headers.host, &headers.user.uuid, &mut conn).await),
+ "data": convert_json_key_lcase_first(_get_org_details(org_id, &headers.user.uuid, &conn).await),
"object": "list",
"continuationToken": null,
}
@@ -2340,8 +2038,8 @@ async fn get_org_export(org_id: &str, headers: AdminHeaders, mut conn: DbConn) -
} else {
// v2023.1.0 and newer response
Json(json!({
- "collections": convert_json_key_lcase_first(_get_org_collections(org_id, &mut conn).await),
- "ciphers": convert_json_key_lcase_first(_get_org_details(org_id, &headers.host, &headers.user.uuid, &mut conn).await),
+ "collections": convert_json_key_lcase_first(_get_org_collections(org_id, &conn).await),
+ "ciphers": convert_json_key_lcase_first(_get_org_details(org_id, &headers.user.uuid, &conn).await),
}))
}
}
@@ -2351,16 +2049,16 @@ async fn _api_key(
data: JsonUpcase<PasswordOrOtpData>,
rotate: bool,
headers: AdminHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
// Validate the admin users password/otp
- data.validate(&user, true, &mut conn).await?;
+ data.validate(&user, true, &conn).await?;
- let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
- Some(mut org_api_key) => {
+ let org_api_key =
+ if let Some(mut org_api_key) = OrganizationApiKey::find_by_org_uuid(org_id, &conn).await {
if rotate {
org_api_key.api_key = crate::crypto::generate_api_key();
org_api_key.revision_date = chrono::Utc::now().naive_utc();
@@ -2370,8 +2068,7 @@ async fn _api_key(
.expect("Error rotating organization API Key");
}
org_api_key
- }
- None => {
+ } else {
let api_key = crate::crypto::generate_api_key();
let new_org_api_key = OrganizationApiKey::new(String::from(org_id), api_key);
new_org_api_key
@@ -2379,8 +2076,7 @@ async fn _api_key(
.await
.expect("Error creating organization API Key");
new_org_api_key
- }
- };
+ };
Ok(Json(json!({
"ApiKey": org_api_key.api_key,
diff --git a/src/api/core/public.rs b/src/api/core/public.rs
@@ -1,17 +1,14 @@
+use crate::{
+ api::{EmptyResult, JsonUpcase},
+ auth,
+ db::{models::OrganizationApiKey, DbConn},
+};
use chrono::Utc;
use rocket::{
request::{self, FromRequest, Outcome},
Request, Route,
};
-use std::collections::HashSet;
-
-use crate::{
- api::{EmptyResult, JsonUpcase},
- auth,
- db::{models::*, DbConn},
-};
-
pub fn routes() -> Vec<Route> {
routes![ldap_import]
}
@@ -21,7 +18,7 @@ pub fn routes() -> Vec<Route> {
struct OrgImportGroupData;
#[derive(Deserialize)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
struct OrgImportUserData {
Email: String,
ExternalId: String,
@@ -29,123 +26,20 @@ struct OrgImportUserData {
}
#[derive(Deserialize)]
-#[allow(non_snake_case)]
+#[allow(dead_code, non_snake_case)]
struct OrgImportData {
Members: Vec<OrgImportUserData>,
OverwriteExisting: bool,
// LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set.
}
+#[allow(unused_variables)]
#[post("/public/organization/import", data = "<data>")]
-async fn ldap_import(
- data: JsonUpcase<OrgImportData>,
- token: PublicToken,
- mut conn: DbConn,
-) -> EmptyResult {
- // Most of the logic for this function can be found here
- // https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797
-
- let org_id = token.0;
- let data = data.into_inner().data;
-
- for user_data in &data.Members {
- if user_data.Deleted {
- // If user is marked for deletion and it exists, revoke it
- if let Some(mut user_org) =
- UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
- {
- // Only revoke a user if it is not the last confirmed owner
- let revoked = if user_org.atype == UserOrgType::Owner
- && user_org.status == UserOrgStatus::Confirmed as i32
- {
- if UserOrganization::count_confirmed_by_org_and_type(
- &org_id,
- UserOrgType::Owner,
- &mut conn,
- )
- .await
- <= 1
- {
- warn!("Can't revoke the last owner");
- false
- } else {
- user_org.revoke()
- }
- } else {
- user_org.revoke()
- };
-
- let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
- if revoked || ext_modified {
- user_org.save(&mut conn).await?;
- }
- }
- // If user is part of the organization, restore it
- } else if let Some(mut user_org) =
- UserOrganization::find_by_email_and_org(&user_data.Email, &org_id, &mut conn).await
- {
- let restored = user_org.restore();
- let ext_modified = user_org.set_external_id(Some(user_data.ExternalId.clone()));
- if restored || ext_modified {
- user_org.save(&mut conn).await?;
- }
- } else {
- // If user is not part of the organization
- let user = match User::find_by_mail(&user_data.Email, &mut conn).await {
- Some(user) => user, // exists in vaultwarden
- None => {
- // User does not exist yet
- let mut new_user = User::new(user_data.Email.clone());
- new_user.save(&mut conn).await?;
- let invitation = Invitation::new(&new_user.email);
- invitation.save(&mut conn).await?;
- new_user
- }
- };
- let user_org_status = UserOrgStatus::Accepted as i32;
- let mut new_org_user = UserOrganization::new(user.uuid.clone(), org_id.clone());
- new_org_user.set_external_id(Some(user_data.ExternalId.clone()));
- new_org_user.access_all = false;
- new_org_user.atype = UserOrgType::User as i32;
- new_org_user.status = user_org_status;
- new_org_user.save(&mut conn).await?;
- }
- }
- warn!("Group support is disabled, groups will not be imported!");
- // If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true)
- if data.OverwriteExisting {
- // Generate a HashSet to quickly verify if a member is listed or not.
- let sync_members: HashSet<String> =
- data.Members.into_iter().map(|m| m.ExternalId).collect();
- for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await {
- if let Some(ref user_external_id) = user_org.external_id {
- if !sync_members.contains(user_external_id) {
- if user_org.atype == UserOrgType::Owner
- && user_org.status == UserOrgStatus::Confirmed as i32
- {
- // Removing owner, check that there is at least one other confirmed owner
- if UserOrganization::count_confirmed_by_org_and_type(
- &org_id,
- UserOrgType::Owner,
- &mut conn,
- )
- .await
- <= 1
- {
- warn!("Can't delete the last owner");
- continue;
- }
- }
- user_org.delete(&mut conn).await?;
- }
- }
- }
- }
-
- Ok(())
+fn ldap_import(data: JsonUpcase<OrgImportData>, _token: PublicToken, _conn: DbConn) -> EmptyResult {
+ err!("LDAP import is permanently disabled.")
}
-pub struct PublicToken(String);
+struct PublicToken(String);
#[rocket::async_trait]
impl<'r> FromRequest<'r> for PublicToken {
@@ -162,9 +56,8 @@ impl<'r> FromRequest<'r> for PublicToken {
None => err_handler!("No access token provided"),
};
// Check JWT token is valid and get device and user from it
- let claims = match auth::decode_api_org(access_token) {
- Ok(claims) => claims,
- Err(_) => err_handler!("Invalid claim"),
+ let Ok(claims) = auth::decode_api_org(access_token) else {
+ err_handler!("Invalid claim")
};
// Check if time is between claims.nbf and claims.exp
let time_now = Utc::now().naive_utc().timestamp();
@@ -175,28 +68,26 @@ impl<'r> FromRequest<'r> for PublicToken {
err_handler!("Token expired");
}
// Check if claims.iss is host|claims.scope[0]
- let host = match auth::Host::from_request(request).await {
- Outcome::Success(host) => host,
- _ => err_handler!("Error getting Host"),
+ let Outcome::Success(host) = auth::Host::from_request(request).await else {
+ err_handler!("Error getting Host")
};
let complete_host = format!("{}|{}", host.host, claims.scope[0]);
if complete_host != claims.iss {
err_handler!("Token not issued by this server");
}
-
// Check if claims.sub is org_api_key.uuid
// Check if claims.client_sub is org_api_key.org_uuid
- let conn = match DbConn::from_request(request).await {
- Outcome::Success(conn) => conn,
- _ => err_handler!("Error getting DB"),
+ let Some(org_uuid) = claims.client_id.strip_prefix("organization.") else {
+ err_handler!("Malformed client_id")
};
- let org_uuid = match claims.client_id.strip_prefix("organization.") {
- Some(uuid) => uuid,
- None => err_handler!("Malformed client_id"),
- };
- let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await {
- Some(org_api_key) => org_api_key,
- None => err_handler!("Invalid client_id"),
+ let org_api_key = match DbConn::from_request(request).await {
+ Outcome::Success(conn) => {
+ match OrganizationApiKey::find_by_org_uuid(org_uuid, &conn).await {
+ Some(org_api_key) => org_api_key,
+ None => err_handler!("Invalid client_id"),
+ }
+ }
+ _ => err_handler!("Error getting DB"),
};
if org_api_key.org_uuid != claims.client_sub {
err_handler!("Token not issued for this org");
@@ -205,6 +96,6 @@ impl<'r> FromRequest<'r> for PublicToken {
err_handler!("Token not issued for this client");
}
- Outcome::Success(PublicToken(claims.client_sub))
+ Outcome::Success(Self(claims.client_sub))
}
}
diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs
@@ -121,8 +121,8 @@ fn post_send_file_v2_data(
#[derive(Deserialize)]
#[allow(dead_code, non_snake_case)]
-pub struct SendAccessData {
- pub Password: Option<String>,
+struct SendAccessData {
+ Password: Option<String>,
}
#[allow(unused_variables)]
diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs
@@ -1,16 +1,15 @@
-use data_encoding::BASE32;
-use rocket::serde::json::Json;
-use rocket::Route;
-
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData},
auth::{ClientIp, Headers},
crypto,
db::{
- models::{EventType, TwoFactor, TwoFactorType},
+ models::{TwoFactor, TwoFactorType},
DbConn,
},
};
+use data_encoding::BASE32;
+use rocket::serde::json::Json;
+use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![
@@ -24,13 +23,13 @@ pub fn routes() -> Vec<Route> {
async fn generate_authenticator(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
- data.validate(&user, false, &mut conn).await?;
+ data.validate(&user, false, &conn).await?;
let type_ = TwoFactorType::Authenticator as i32;
- let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await;
+ let twofactor = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await;
let (enabled, key) = match twofactor {
Some(tf) => (true, tf.data),
@@ -56,7 +55,7 @@ struct EnableAuthenticatorData {
async fn activate_authenticator(
data: JsonUpcase<EnableAuthenticatorData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: EnableAuthenticatorData = data.into_inner().data;
let key = data.Key;
@@ -67,7 +66,7 @@ async fn activate_authenticator(
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
- .validate(&user, true, &mut conn)
+ .validate(&user, true, &conn)
.await?;
// Validate key as base32 and 20 bytes length
@@ -81,14 +80,7 @@ async fn activate_authenticator(
}
// Validate the token provided with the key, and save new twofactor
- validate_totp_code(
- &user.uuid,
- &token,
- &key.to_uppercase(),
- &headers.ip,
- &mut conn,
- )
- .await?;
+ validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &headers.ip, &conn).await?;
Ok(Json(json!({
"Enabled": true,
"Key": key,
@@ -110,7 +102,7 @@ pub async fn validate_totp_code_str(
totp_code: &str,
secret: &str,
ip: &ClientIp,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
if !totp_code.chars().all(char::is_numeric) {
err!("TOTP code is not a number");
@@ -119,45 +111,51 @@ pub async fn validate_totp_code_str(
validate_totp_code(user_uuid, totp_code, secret, ip, conn).await
}
-pub async fn validate_totp_code(
+async fn validate_totp_code(
user_uuid: &str,
totp_code: &str,
secret: &str,
ip: &ClientIp,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
use totp_lite::{totp_custom, Sha1};
- let decoded_secret = match BASE32.decode(secret.as_bytes()) {
- Ok(s) => s,
- Err(_) => err!("Invalid TOTP secret"),
- };
-
- let mut twofactor = match TwoFactor::find_by_user_and_type(
- user_uuid,
- TwoFactorType::Authenticator as i32,
- conn,
- )
- .await
- {
- Some(tf) => tf,
- _ => TwoFactor::new(
- user_uuid.to_string(),
- TwoFactorType::Authenticator,
- secret.to_string(),
- ),
+ let Ok(decoded_secret) = BASE32.decode(secret.as_bytes()) else {
+ err!("Invalid TOTP secret")
};
+ let mut twofactor =
+ (TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn)
+ .await)
+ .map_or_else(
+ || {
+ TwoFactor::new(
+ user_uuid.to_string(),
+ TwoFactorType::Authenticator,
+ secret.to_string(),
+ )
+ },
+ |tf| tf,
+ );
let steps = 0;
// Get the current system time in UNIX Epoch (UTC)
let current_time = chrono::Utc::now();
let current_timestamp = current_time.timestamp();
for step in -steps..=steps {
- let time_step = current_timestamp / 30i64 + step;
-
+ let time_step = (current_timestamp / 30i64)
+ .checked_add(step)
+ .expect("overflow during addition of TOTP");
// We need to calculate the time offsite and cast it as an u64.
// Since we only have times into the future and the totp generator needs an u64 instead of the default i64.
- let time = (current_timestamp + step * 30i64) as u64;
+ let time = u64::try_from(
+ current_timestamp
+ .checked_add(
+ step.checked_mul(30i64)
+ .expect("overflow during multiplication in TOTP"),
+ )
+ .expect("overflow during addition in TOTP"),
+ )
+ .expect("underflow when casting to a u64 in TOTP");
let generated = totp_custom::<Sha1>(30, 6, &decoded_secret, time);
// Check the the given code equals the generated and if the time_step is larger then the one last used.
@@ -169,7 +167,8 @@ pub async fn validate_totp_code(
// Save the last used time step so only totp time steps higher then this one are allowed.
// This will also save a newly created twofactor if the code is correct.
- twofactor.last_used = time_step as i32;
+ twofactor.last_used = i32::try_from(time_step)
+ .expect("overflow or underflow when casting to an i32 in TOTP");
twofactor.save(conn).await?;
return Ok(());
} else if generated == totp_code && time_step <= i64::from(twofactor.last_used) {
@@ -177,28 +176,18 @@ pub async fn validate_totp_code(
"This TOTP or a TOTP code within {} steps back or forward has already been used!",
steps
);
- err!(
- format!(
- "Invalid TOTP code! Server time: {} IP: {}",
- current_time.format("%F %T UTC"),
- ip.ip
- ),
- ErrorEvent {
- event: EventType::UserFailedLogIn2fa
- }
- );
+ err!(format!(
+ "Invalid TOTP code! Server time: {} IP: {}",
+ current_time.format("%F %T UTC"),
+ ip.ip
+ ));
}
}
// Else no valid code received, deny access
- err!(
- format!(
- "Invalid TOTP code! Server time: {} IP: {}",
- current_time.format("%F %T UTC"),
- ip.ip
- ),
- ErrorEvent {
- event: EventType::UserFailedLogIn2fa
- }
- );
+ err!(format!(
+ "Invalid TOTP code! Server time: {} IP: {}",
+ current_time.format("%F %T UTC"),
+ ip.ip
+ ));
}
diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs
@@ -1,7 +1,10 @@
use crate::{
api::{JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData},
auth::{ClientHeaders, Headers},
- db::{models::*, DbConn},
+ db::{
+ models::{OrgPolicyType, TwoFactor, UserOrgType, UserOrganization},
+ DbConn,
+ },
};
pub mod authenticator;
pub mod protected_actions;
@@ -27,8 +30,8 @@ pub fn routes() -> Vec<Route> {
}
#[get("/two-factor")]
-async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
- let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &mut conn).await;
+async fn get_twofactor(headers: Headers, conn: DbConn) -> Json<Value> {
+ let twofactors = TwoFactor::find_by_user(&headers.user.uuid, &conn).await;
let twofactors_json: Vec<Value> = twofactors.iter().map(TwoFactor::to_json_provider).collect();
Json(json!({
@@ -42,12 +45,12 @@ async fn get_twofactor(headers: Headers, mut conn: DbConn) -> Json<Value> {
async fn get_recover(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
- data.validate(&user, true, &mut conn).await?;
+ data.validate(&user, true, &conn).await?;
Ok(Json(json!({
"Code": user.totp_recover,
@@ -67,16 +70,13 @@ struct RecoverTwoFactor {
async fn recover(
data: JsonUpcase<RecoverTwoFactor>,
_client_headers: ClientHeaders,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: RecoverTwoFactor = data.into_inner().data;
-
use crate::db::models::User;
-
// Get the user
- let mut user = match User::find_by_mail(&data.Email, &mut conn).await {
- Some(user) => user,
- None => err!("Username or password is incorrect. Try again."),
+ let Some(mut user) = User::find_by_mail(&data.Email, &conn).await else {
+ err!("Username or password is incorrect. Try again.")
};
// Check password
@@ -90,10 +90,10 @@ async fn recover(
}
// Remove all twofactors from the user
- TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?;
+ TwoFactor::delete_all_by_user(&user.uuid, &conn).await?;
// Remove the recovery code, not needed without twofactors
user.totp_recover = None;
- user.save(&mut conn).await?;
+ user.save(&conn).await?;
Ok(Json(Value::Object(serde_json::Map::new())))
}
@@ -109,7 +109,7 @@ struct DisableTwoFactorData {
async fn disable_twofactor(
data: JsonUpcase<DisableTwoFactorData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: DisableTwoFactorData = data.into_inner().data;
let user = headers.user;
@@ -119,30 +119,27 @@ async fn disable_twofactor(
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
- .validate(&user, true, &mut conn)
+ .validate(&user, true, &conn)
.await?;
let type_ = data.Type.into_i32()?;
- if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
- twofactor.delete(&mut conn).await?;
+ if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await {
+ twofactor.delete(&conn).await?;
}
- let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn)
- .await
- .is_empty();
+ let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &conn).await.is_empty();
if twofactor_disabled {
for user_org in UserOrganization::find_by_user_and_policy(
&user.uuid,
OrgPolicyType::TwoFactorAuthentication,
- &mut conn,
+ &conn,
)
.await
- .into_iter()
{
if user_org.atype < UserOrgType::Admin {
- user_org.delete(&mut conn).await?;
+ user_org.delete(&conn).await?;
}
}
}
diff --git a/src/api/core/two_factor/protected_actions.rs b/src/api/core/two_factor/protected_actions.rs
@@ -1,6 +1,3 @@
-use chrono::{Duration, NaiveDateTime, Utc};
-use rocket::Route;
-
use crate::{
api::{EmptyResult, JsonUpcase},
auth::Headers,
@@ -11,6 +8,8 @@ use crate::{
},
error::{Error, MapResult},
};
+use chrono::{Duration, NaiveDateTime, Utc};
+use rocket::Route;
pub fn routes() -> Vec<Route> {
routes![request_otp, verify_otp]
@@ -18,17 +17,17 @@ pub fn routes() -> Vec<Route> {
/// Data stored in the TwoFactor table in the db
#[derive(Serialize, Deserialize, Debug)]
-pub struct ProtectedActionData {
+struct ProtectedActionData {
/// Token issued to validate the protected action
- pub token: String,
+ token: String,
/// UNIX timestamp of token issue.
- pub token_sent: i64,
+ token_sent: i64,
// The total amount of attempts
- pub attempts: u8,
+ attempts: u8,
}
impl ProtectedActionData {
- pub fn from_json(string: &str) -> Result<Self, Error> {
+ fn from_json(string: &str) -> Result<Self, Error> {
let res: Result<Self, crate::serde_json::Error> = serde_json::from_str(string);
match res {
Ok(x) => Ok(x),
@@ -36,7 +35,7 @@ impl ProtectedActionData {
}
}
- pub fn add_attempt(&mut self) {
+ fn add_attempt(&mut self) {
self.attempts += 1;
}
}
@@ -66,7 +65,7 @@ pub async fn validate_protected_action_otp(
otp: &str,
user_uuid: &str,
delete_if_valid: bool,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
let pa = TwoFactor::find_by_user_and_type(
user_uuid,
diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs
@@ -1,21 +1,26 @@
-use rocket::serde::json::Json;
-use rocket::Route;
-use serde_json::Value;
-use url::Url;
-use webauthn_rs::{
- base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn,
-};
-
use crate::{
api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordOrOtpData},
auth::Headers,
config,
db::{
- models::{EventType, TwoFactor, TwoFactorType},
+ models::{TwoFactor, TwoFactorType},
DbConn,
},
error::Error,
};
+use rocket::serde::json::Json;
+use rocket::Route;
+use serde_json::Value;
+use url::Url;
+use webauthn_rs::{
+ base64_data::Base64UrlSafeData,
+ proto::{
+ AuthenticationExtensionsClientOutputs, AuthenticatorAssertionResponseRaw,
+ AuthenticatorAttestationResponseRaw, Credential, PublicKeyCredential,
+ RegisterPublicKeyCredential, RequestAuthenticationExtensions,
+ },
+ AuthenticationState, RegistrationState, Webauthn,
+};
pub fn routes() -> Vec<Route> {
routes![
@@ -31,22 +36,22 @@ pub fn routes() -> Vec<Route> {
// Both `struct Registration` and `struct U2FRegistration` can be removed if we remove the u2f to WebAuthn migration
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
-pub struct Registration {
- pub key_handle: Vec<u8>,
- pub pub_key: Vec<u8>,
- pub attestation_cert: Option<Vec<u8>>,
- pub device_name: Option<String>,
+struct Registration {
+ key_handle: Vec<u8>,
+ pub_key: Vec<u8>,
+ attestation_cert: Option<Vec<u8>>,
+ device_name: Option<String>,
}
#[derive(Serialize, Deserialize)]
-pub struct U2FRegistration {
- pub id: i32,
- pub name: String,
+struct U2FRegistration {
+ id: i32,
+ name: String,
#[serde(with = "Registration")]
- pub reg: Registration,
- pub counter: u32,
+ reg: Registration,
+ counter: u32,
compromised: bool,
- pub migrated: Option<bool>,
+ migrated: Option<bool>,
}
struct WebauthnConfig {
@@ -89,12 +94,11 @@ impl webauthn_rs::WebauthnConfig for WebauthnConfig {
}
#[derive(Debug, Serialize, Deserialize)]
-pub struct WebauthnRegistration {
- pub id: i32,
- pub name: String,
- pub migrated: bool,
-
- pub credential: Credential,
+struct WebauthnRegistration {
+ id: i32,
+ name: String,
+ migrated: bool,
+ credential: Credential,
}
impl WebauthnRegistration {
@@ -111,14 +115,14 @@ impl WebauthnRegistration {
async fn get_webauthn(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
- data.validate(&user, false, &mut conn).await?;
+ data.validate(&user, false, &conn).await?;
- let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &mut conn).await?;
+ let (enabled, registrations) = get_webauthn_registrations(&user.uuid, &conn).await?;
let registrations_json: Vec<Value> = registrations
.iter()
.map(WebauthnRegistration::to_json)
@@ -134,12 +138,12 @@ async fn get_webauthn(
async fn generate_webauthn_challenge(
data: JsonUpcase<PasswordOrOtpData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: PasswordOrOtpData = data.into_inner().data;
let user = headers.user;
- data.validate(&user, false, &mut conn).await?;
- let registrations = get_webauthn_registrations(&user.uuid, &mut conn)
+ data.validate(&user, false, &conn).await?;
+ let registrations = get_webauthn_registrations(&user.uuid, &conn)
.await?
.1
.into_iter()
@@ -157,7 +161,7 @@ async fn generate_webauthn_challenge(
let type_ = TwoFactorType::WebauthnRegisterChallenge;
TwoFactor::new(user.uuid, type_, serde_json::to_string(&state)?)
- .save(&mut conn)
+ .save(&conn)
.await?;
let mut challenge_value = serde_json::to_value(challenge.public_key)?;
challenge_value["status"] = "ok".into();
@@ -179,18 +183,18 @@ struct EnableWebauthnData {
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
struct RegisterPublicKeyCredentialCopy {
- pub Id: String,
- pub RawId: Base64UrlSafeData,
- pub Response: AuthenticatorAttestationResponseRawCopy,
- pub Type: String,
+ Id: String,
+ RawId: Base64UrlSafeData,
+ Response: AuthenticatorAttestationResponseRawCopy,
+ Type: String,
}
// This is copied from AuthenticatorAttestationResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
-pub struct AuthenticatorAttestationResponseRawCopy {
- pub AttestationObject: Base64UrlSafeData,
- pub ClientDataJson: Base64UrlSafeData,
+struct AuthenticatorAttestationResponseRawCopy {
+ AttestationObject: Base64UrlSafeData,
+ ClientDataJson: Base64UrlSafeData,
}
impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
@@ -210,29 +214,29 @@ impl From<RegisterPublicKeyCredentialCopy> for RegisterPublicKeyCredential {
// This is copied from PublicKeyCredential to change the Response objects casing
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
-pub struct PublicKeyCredentialCopy {
- pub Id: String,
- pub RawId: Base64UrlSafeData,
- pub Response: AuthenticatorAssertionResponseRawCopy,
- pub Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
- pub Type: String,
+struct PublicKeyCredentialCopy {
+ Id: String,
+ RawId: Base64UrlSafeData,
+ Response: AuthenticatorAssertionResponseRawCopy,
+ Extensions: Option<AuthenticationExtensionsClientOutputsCopy>,
+ Type: String,
}
// This is copied from AuthenticatorAssertionResponseRaw to change clientDataJSON to clientDataJson
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
-pub struct AuthenticatorAssertionResponseRawCopy {
- pub AuthenticatorData: Base64UrlSafeData,
- pub ClientDataJson: Base64UrlSafeData,
- pub Signature: Base64UrlSafeData,
- pub UserHandle: Option<Base64UrlSafeData>,
+struct AuthenticatorAssertionResponseRawCopy {
+ AuthenticatorData: Base64UrlSafeData,
+ ClientDataJson: Base64UrlSafeData,
+ Signature: Base64UrlSafeData,
+ UserHandle: Option<Base64UrlSafeData>,
}
#[derive(Debug, Deserialize)]
#[allow(non_snake_case)]
-pub struct AuthenticationExtensionsClientOutputsCopy {
+struct AuthenticationExtensionsClientOutputsCopy {
#[serde(default)]
- pub Appid: bool,
+ Appid: bool,
}
impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
@@ -258,7 +262,7 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential {
async fn activate_webauthn(
data: JsonUpcase<EnableWebauthnData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
let data: EnableWebauthnData = data.into_inner().data;
let user = headers.user;
@@ -266,15 +270,15 @@ async fn activate_webauthn(
MasterPasswordHash: data.MasterPasswordHash,
Otp: data.Otp,
}
- .validate(&user, true, &mut conn)
+ .validate(&user, true, &conn)
.await?;
// Retrieve and delete the saved challenge state
let type_ = TwoFactorType::WebauthnRegisterChallenge as i32;
- let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await {
+ let state = match TwoFactor::find_by_user_and_type(&user.uuid, type_, &conn).await {
Some(tf) => {
let state: RegistrationState = serde_json::from_str(&tf.data)?;
- tf.delete(&mut conn).await?;
+ tf.delete(&conn).await?;
state
}
None => err!("Can't recover challenge"),
@@ -285,7 +289,7 @@ async fn activate_webauthn(
WebauthnConfig::load()
.register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?;
- let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1;
+ let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &conn).await?.1;
// TODO: Check for repeated ID's
registrations.push(WebauthnRegistration {
id: data.Id.into_i32()?,
@@ -301,7 +305,7 @@ async fn activate_webauthn(
TwoFactorType::Webauthn,
serde_json::to_string(®istrations)?,
)
- .save(&mut conn)
+ .save(&conn)
.await?;
let keys_json: Vec<Value> = registrations
.iter()
@@ -333,7 +337,7 @@ struct DeleteU2FData {
async fn delete_webauthn(
data: JsonUpcase<DeleteU2FData>,
headers: Headers,
- mut conn: DbConn,
+ conn: DbConn,
) -> JsonResult {
if !headers
.user
@@ -341,19 +345,15 @@ async fn delete_webauthn(
{
err!("Invalid password");
}
- let mut tf = match TwoFactor::find_by_user_and_type(
- &headers.user.uuid,
- TwoFactorType::Webauthn as i32,
- &mut conn,
- )
- .await
- {
- Some(tf) => tf,
- None => err!("Webauthn data not found!"),
+ let Some(mut tf) =
+ TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &conn)
+ .await
+ else {
+ err!("Webauthn data not found!")
};
let data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?;
tf.data = serde_json::to_string(&data)?;
- tf.save(&mut conn).await?;
+ tf.save(&conn).await?;
drop(tf);
let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect();
Ok(Json(json!({
@@ -363,9 +363,9 @@ async fn delete_webauthn(
})))
}
-pub async fn get_webauthn_registrations(
+async fn get_webauthn_registrations(
user_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Result<(bool, Vec<WebauthnRegistration>), Error> {
let type_ = TwoFactorType::Webauthn as i32;
match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
@@ -374,7 +374,7 @@ pub async fn get_webauthn_registrations(
}
}
-pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult {
+pub async fn generate_webauthn_login(user_uuid: &str, conn: &DbConn) -> JsonResult {
// Load saved credentials
let creds: Vec<Credential> = get_webauthn_registrations(user_uuid, conn)
.await?
@@ -410,7 +410,7 @@ pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> Json
pub async fn validate_webauthn_login(
user_uuid: &str,
response: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
let type_ = TwoFactorType::WebauthnLoginChallenge as i32;
let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await {
@@ -419,23 +419,14 @@ pub async fn validate_webauthn_login(
tf.delete(conn).await?;
state
}
- None => err!(
- "Can't recover login challenge",
- ErrorEvent {
- event: EventType::UserFailedLogIn2fa
- }
- ),
+ None => err!("Can't recover login challenge"),
};
-
let rsp: crate::util::UpCase<PublicKeyCredentialCopy> = serde_json::from_str(response)?;
let rsp: PublicKeyCredential = rsp.data.into();
-
let mut registrations = get_webauthn_registrations(user_uuid, conn).await?.1;
-
// If the credential we received is migrated from U2F, enable the U2F compatibility
//let use_u2f = registrations.iter().any(|r| r.migrated && r.credential.cred_id == rsp.raw_id.0);
let (cred_id, auth_data) = WebauthnConfig::load().authenticate_credential(&rsp, &state)?;
-
for reg in &mut registrations {
if ®.credential.cred_id == cred_id {
reg.credential.counter = auth_data.counter;
@@ -451,10 +442,5 @@ pub async fn validate_webauthn_login(
}
}
- err!(
- "Credential not present",
- ErrorEvent {
- event: EventType::UserFailedLogIn2fa
- }
- )
+ err!("Credential not present")
}
diff --git a/src/api/identity.rs b/src/api/identity.rs
@@ -8,12 +8,18 @@ use serde_json::Value;
use crate::{
api::{
- core::accounts::{PreloginData, RegisterData, _prelogin, _register},
+ core::accounts::{PreloginData, RegisterData, _prelogin},
ApiResult, EmptyResult, JsonResult, JsonUpcase,
},
auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp},
config,
- db::{models::*, DbConn},
+ db::{
+ models::{
+ AuthRequest, Device, OrganizationApiKey, TwoFactor, TwoFactorType, User,
+ UserOrganization,
+ },
+ DbConn,
+ },
error::MapResult,
util,
};
@@ -23,17 +29,13 @@ pub fn routes() -> Vec<Route> {
}
#[post("/connect/token", data = "<data>")]
-async fn login(
- data: Form<ConnectData>,
- client_header: ClientHeaders,
- mut conn: DbConn,
-) -> JsonResult {
+async fn login(data: Form<ConnectData>, client_header: ClientHeaders, conn: DbConn) -> JsonResult {
let data: ConnectData = data.into_inner();
let mut user_uuid: Option<String> = None;
let login_result = match data.grant_type.as_ref() {
"refresh_token" => {
_check_is_some(&data.refresh_token, "refresh_token cannot be blank")?;
- _refresh_login(data, &mut conn).await
+ _refresh_login(data, &conn).await
}
"password" => {
_check_is_some(&data.client_id, "client_id cannot be blank")?;
@@ -45,7 +47,7 @@ async fn login(
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
- _password_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
+ _password_login(data, &mut user_uuid, &conn, &client_header.ip).await
}
"client_credentials" => {
_check_is_some(&data.client_id, "client_id cannot be blank")?;
@@ -56,14 +58,14 @@ async fn login(
_check_is_some(&data.device_name, "device_name cannot be blank")?;
_check_is_some(&data.device_type, "device_type cannot be blank")?;
- _api_key_login(data, &mut user_uuid, &mut conn, &client_header.ip).await
+ _api_key_login(data, &mut user_uuid, &conn, &client_header.ip).await
}
t => err!("Invalid type", t),
};
login_result
}
-async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
+async fn _refresh_login(data: ConnectData, conn: &DbConn) -> JsonResult {
// Extract token
let token = data.refresh_token.unwrap();
@@ -104,7 +106,7 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult {
async fn _password_login(
data: ConnectData,
user_uuid: &mut Option<String>,
- conn: &mut DbConn,
+ conn: &DbConn,
ip: &ClientIp,
) -> JsonResult {
// Validate scope
@@ -115,12 +117,11 @@ async fn _password_login(
let scope_vec = vec!["api".into(), "offline_access".into()];
// Get the user
let username = data.username.as_ref().unwrap().trim();
- let mut user = match User::find_by_mail(username, conn).await {
- Some(user) => user,
- None => err!(
+ let Some(mut user) = User::find_by_mail(username, conn).await else {
+ err!(
"Username or password is incorrect. Try again",
format!("IP: {}. Username: {}.", ip.ip, username)
- ),
+ )
};
// Set the user_uuid here to be passed back used for event logging.
@@ -135,28 +136,19 @@ async fn _password_login(
if !auth_request.check_access_code(password) {
err!(
"Username or access code is incorrect. Try again",
- format!("IP: {}. Username: {}.", ip.ip, username),
- ErrorEvent {
- event: EventType::UserFailedLogIn,
- }
+ format!("IP: {}. Username: {}.", ip.ip, username)
)
}
} else {
err!(
"Auth request not found. Try again.",
- format!("IP: {}. Username: {}.", ip.ip, username),
- ErrorEvent {
- event: EventType::UserFailedLogIn,
- }
+ format!("IP: {}. Username: {}.", ip.ip, username)
)
}
} else if !user.check_valid_password(password) {
err!(
"Username or password is incorrect. Try again",
- format!("IP: {}. Username: {}.", ip.ip, username),
- ErrorEvent {
- event: EventType::UserFailedLogIn,
- }
+ format!("IP: {}. Username: {}.", ip.ip, username)
)
}
@@ -166,7 +158,7 @@ async fn _password_login(
user.set_password(password, None, false, None);
if let Err(e) = user.save(conn).await {
- panic!("Error updating user: {:#?}", e);
+ panic!("Error updating user: {e:#?}");
}
}
@@ -174,10 +166,7 @@ async fn _password_login(
if !user.enabled {
err!(
"This user has been disabled",
- format!("IP: {}. Username: {}.", ip.ip, username),
- ErrorEvent {
- event: EventType::UserFailedLogIn
- }
+ format!("IP: {}. Username: {}.", ip.ip, username)
)
}
let (mut device, _) = get_device(&data, conn, &user).await;
@@ -219,7 +208,7 @@ async fn _password_login(
async fn _api_key_login(
data: ConnectData,
user_uuid: &mut Option<String>,
- conn: &mut DbConn,
+ conn: &DbConn,
ip: &ClientIp,
) -> JsonResult {
// Validate scope
@@ -233,18 +222,16 @@ async fn _api_key_login(
async fn _user_api_key_login(
data: ConnectData,
user_uuid: &mut Option<String>,
- conn: &mut DbConn,
+ conn: &DbConn,
ip: &ClientIp,
) -> JsonResult {
// Get the user via the client_id
let client_id = data.client_id.as_ref().unwrap();
- let client_user_uuid = match client_id.strip_prefix("user.") {
- Some(uuid) => uuid,
- None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
+ let Some(client_user_uuid) = client_id.strip_prefix("user.") else {
+ err!("Malformed client_id", format!("IP: {}.", ip.ip))
};
- let user = match User::find_by_uuid(client_user_uuid, conn).await {
- Some(user) => user,
- None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
+ let Some(user) = User::find_by_uuid(client_user_uuid, conn).await else {
+ err!("Invalid client_id", format!("IP: {}.", ip.ip))
};
// Set the user_uuid here to be passed back used for event logging.
@@ -254,10 +241,7 @@ async fn _user_api_key_login(
if !user.enabled {
err!(
"This user has been disabled (API key login)",
- format!("IP: {}. Username: {}.", ip.ip, user.email),
- ErrorEvent {
- event: EventType::UserFailedLogIn
- }
+ format!("IP: {}. Username: {}.", ip.ip, user.email)
)
}
@@ -266,10 +250,7 @@ async fn _user_api_key_login(
if !user.check_valid_api_key(client_secret) {
err!(
"Incorrect client_secret",
- format!("IP: {}. Username: {}.", ip.ip, user.email),
- ErrorEvent {
- event: EventType::UserFailedLogIn
- }
+ format!("IP: {}. Username: {}.", ip.ip, user.email)
)
}
@@ -307,18 +288,16 @@ async fn _user_api_key_login(
async fn _organization_api_key_login(
data: ConnectData,
- conn: &mut DbConn,
+ conn: &DbConn,
ip: &ClientIp,
) -> JsonResult {
// Get the org via the client_id
let client_id = data.client_id.as_ref().unwrap();
- let org_uuid = match client_id.strip_prefix("organization.") {
- Some(uuid) => uuid,
- None => err!("Malformed client_id", format!("IP: {}.", ip.ip)),
+ let Some(org_uuid) = client_id.strip_prefix("organization.") else {
+ err!("Malformed client_id", format!("IP: {}.", ip.ip))
};
- let org_api_key = match OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await {
- Some(org_api_key) => org_api_key,
- None => err!("Invalid client_id", format!("IP: {}.", ip.ip)),
+ let Some(org_api_key) = OrganizationApiKey::find_by_org_uuid(org_uuid, conn).await else {
+ err!("Invalid client_id", format!("IP: {}.", ip.ip))
};
// Check API key.
@@ -343,7 +322,7 @@ async fn _organization_api_key_login(
}
/// Retrieves an existing device or creates a new device from ConnectData and the User
-async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Device, bool) {
+async fn get_device(data: &ConnectData, conn: &DbConn, user: &User) -> (Device, bool) {
// On iOS, device_type sends "iOS", on others it sends a number
// When unknown or unable to parse, return 14, which is 'Unknown Browser'
let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(14);
@@ -352,17 +331,15 @@ async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Devi
.clone()
.expect("No device id provided");
let device_name = data.device_name.clone().expect("No device name provided");
-
let mut new_device = false;
// Find device or create new
- let device = match Device::find_by_uuid_and_user(&device_id, &user.uuid, conn).await {
- Some(device) => device,
- None => {
+ let device = (Device::find_by_uuid_and_user(&device_id, &user.uuid, conn).await).map_or_else(
+ || {
new_device = true;
Device::new(device_id, user.uuid.clone(), device_name, device_type)
- }
- };
-
+ },
+ |device| device,
+ );
(device, new_device)
}
@@ -371,7 +348,7 @@ async fn twofactor_auth(
data: &ConnectData,
device: &mut Device,
ip: &ClientIp,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> ApiResult<Option<String>> {
let twofactors = TwoFactor::find_by_user(user_uuid, conn).await;
// No twofactor token if twofactor is disabled
@@ -380,12 +357,11 @@ async fn twofactor_auth(
}
let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect();
let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one
- let twofactor_code = match data.two_factor_token {
- Some(ref code) => code,
- None => err_json!(
+ let Some(ref twofactor_code) = data.two_factor_token else {
+ err_json!(
_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?,
"2FA token not provided"
- ),
+ )
};
let selected_twofactor = twofactors
.into_iter()
@@ -401,17 +377,12 @@ async fn twofactor_auth(
ip,
conn,
)
- .await?
+ .await?;
}
Some(TwoFactorType::Webauthn) => {
- _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?
+ _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await?;
}
- _ => err!(
- "Invalid two factor provider",
- ErrorEvent {
- event: EventType::UserFailedLogIn2fa
- }
- ),
+ _ => err!("Invalid two factor provider"),
}
device.delete_twofactor_remember();
Ok(None)
@@ -424,7 +395,7 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> {
async fn _json_err_twofactor(
providers: &[i32],
user_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> ApiResult<Value> {
use crate::api::core::two_factor;
@@ -438,14 +409,12 @@ async fn _json_err_twofactor(
for provider in providers {
result["TwoFactorProviders2"][provider.to_string()] = Value::Null;
- match TwoFactorType::from_i32(*provider) {
- Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ }
- Some(TwoFactorType::Webauthn) => {
- let request =
- two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
- result["TwoFactorProviders2"][provider.to_string()] = request.0;
- }
- _ => {}
+ if matches!(
+ TwoFactorType::from_i32(*provider),
+ Some(TwoFactorType::Webauthn)
+ ) {
+ let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?;
+ result["TwoFactorProviders2"][provider.to_string()] = request.0;
}
}
@@ -457,9 +426,10 @@ async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> {
_prelogin(data, conn).await
}
+#[allow(unused_variables)]
#[post("/accounts/register", data = "<data>")]
-async fn identity_register(data: JsonUpcase<RegisterData>, conn: DbConn) -> JsonResult {
- _register(data, conn).await
+fn identity_register(data: JsonUpcase<RegisterData>, _conn: DbConn) -> JsonResult {
+ err!("No more registerations allowed.")
}
// https://github.com/bitwarden/jslib/blob/master/common/src/models/request/tokenRequest.ts
@@ -499,7 +469,6 @@ struct ConnectData {
#[field(name = uncased("device_type"))]
#[field(name = uncased("devicetype"))]
device_type: Option<String>,
- #[allow(unused)]
#[field(name = uncased("device_push_token"))]
#[field(name = uncased("devicepushtoken"))]
_device_push_token: Option<String>, // Unused; mobile device push not yet supported.
diff --git a/src/api/mod.rs b/src/api/mod.rs
@@ -4,10 +4,6 @@ mod icons;
mod identity;
mod notifications;
mod web;
-
-use rocket::serde::json::Json;
-use serde_json::Value;
-
pub use crate::api::{
admin::catchers as admin_catchers,
admin::routes as admin_routes,
@@ -23,14 +19,15 @@ pub use crate::api::{
},
web::catchers as web_catchers,
web::routes as web_routes,
- web::static_files,
};
use crate::db::{models::User, DbConn};
use crate::util;
+use rocket::serde::json::Json;
+use serde_json::Value;
// Type aliases for API methods results
type ApiResult<T> = Result<T, crate::error::Error>;
-pub type JsonResult = ApiResult<Json<Value>>;
+type JsonResult = ApiResult<Json<Value>>;
pub type EmptyResult = ApiResult<()>;
type JsonUpcase<T> = Json<util::UpCase<T>>;
@@ -49,7 +46,7 @@ impl PasswordOrOtpData {
/// Tokens used via this struct can be used multiple times during the process
/// First for the validation to continue, after that to enable or validate the following actions
/// This is different per caller, so it can be adjusted to delete the token or not
- pub async fn validate(&self, user: &User, delete_if_valid: bool, conn: &mut DbConn) -> EmptyResult {
+ async fn validate(&self, user: &User, delete_if_valid: bool, conn: &DbConn) -> EmptyResult {
use crate::api::core::two_factor::protected_actions::validate_protected_action_otp;
match (self.MasterPasswordHash.as_deref(), self.Otp.as_deref()) {
@@ -77,8 +74,8 @@ enum NumberOrString {
impl NumberOrString {
fn into_string(self) -> String {
match self {
- NumberOrString::Number(n) => n.to_string(),
- NumberOrString::String(s) => s,
+ Self::Number(n) => n.to_string(),
+ Self::String(s) => s,
}
}
@@ -86,8 +83,8 @@ impl NumberOrString {
fn into_i32(&self) -> ApiResult<i32> {
use std::num::ParseIntError as PIE;
match self {
- NumberOrString::Number(n) => Ok(*n),
- NumberOrString::String(s) => s
+ Self::Number(n) => Ok(*n),
+ Self::String(s) => s
.parse()
.map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())),
}
diff --git a/src/api/notifications.rs b/src/api/notifications.rs
@@ -31,8 +31,7 @@ fn ws_users() -> &'static Arc<WebSocketUsers> {
.get()
.expect("WS_USERS should be initialized in main")
}
-pub static WS_ANONYMOUS_SUBSCRIPTIONS: OnceLock<Arc<AnonymousWebSocketSubscriptions>> =
- OnceLock::new();
+static WS_ANONYMOUS_SUBSCRIPTIONS: OnceLock<Arc<AnonymousWebSocketSubscriptions>> = OnceLock::new();
#[inline]
pub fn init_ws_anonymous_subscriptions() {
if WS_ANONYMOUS_SUBSCRIPTIONS
@@ -184,7 +183,7 @@ fn websockets_hub<'r>(
}}
})
}
-
+#[allow(clippy::unnecessary_wraps)]
#[get("/anonymous-hub?<token..>")]
fn anonymous_websockets_hub<'r>(
ws: rocket_ws::WebSocket,
@@ -251,33 +250,26 @@ fn anonymous_websockets_hub<'r>(
//
// Websockets server
//
-
+#[allow(clippy::cast_possible_truncation)]
fn serialize(val: Value) -> Vec<u8> {
use rmpv::encode::write_value;
-
let mut buf = Vec::new();
write_value(&mut buf, &val).expect("Error encoding MsgPack");
-
// Add size bytes at the start
// Extracted from BinaryMessageFormat.js
let mut size: usize = buf.len();
let mut len_buf: Vec<u8> = Vec::new();
-
loop {
let mut size_part = size & 0x7f;
size >>= 7;
-
if size > 0 {
size_part |= 0x80;
}
-
len_buf.push(size_part as u8);
-
if size == 0 {
break;
}
}
-
len_buf.append(&mut buf);
len_buf
}
@@ -286,19 +278,14 @@ fn serialize_date(date: NaiveDateTime) -> Value {
let seconds: i64 = date.timestamp();
let nanos: i64 = date.timestamp_subsec_nanos().into();
let timestamp = nanos << 34 | seconds;
-
let bs = timestamp.to_be_bytes();
-
// -1 is Timestamp
// https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type
Value::Ext(-1, bs.to_vec())
}
fn convert_option<T: Into<Value>>(option: Option<T>) -> Value {
- match option {
- Some(a) => a.into(),
- None => Value::Nil,
- }
+ option.map_or(Value::Nil, core::convert::Into::into)
}
const RECORD_SEPARATOR: u8 = 0x1e;
@@ -325,7 +312,7 @@ pub struct WebSocketUsers {
impl WebSocketUsers {
async fn send_update(&self, user_uuid: &str, data: &[u8]) {
if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) {
- for (_, sender) in user.iter() {
+ for (_, sender) in &user {
_ = sender.send(Message::binary(data)).await;
}
}
@@ -363,7 +350,7 @@ impl WebSocketUsers {
ut: UpdateType,
folder: &Folder,
acting_device_uuid: &String,
- _: &mut DbConn,
+ _: &DbConn,
) {
let data = create_update(
vec![
@@ -385,31 +372,32 @@ impl WebSocketUsers {
user_uuids: &[String],
acting_device_uuid: &String,
collection_uuids: Option<Vec<String>>,
- _: &mut DbConn,
+ _: &DbConn,
) {
let org_uuid = convert_option(cipher.organization_uuid.clone());
// Depending if there are collections provided or not, we need to have different values for the following variables.
// The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change.
- let (user_uuid, collection_uuids, revision_date) =
- if let Some(collection_uuids) = collection_uuids {
+ let (user_uuid, collection_uuids, revision_date) = collection_uuids.map_or_else(
+ || {
+ (
+ convert_option(cipher.user_uuid.clone()),
+ Value::Nil,
+ serialize_date(cipher.updated_at),
+ )
+ },
+ |collection_uuids| {
(
Value::Nil,
Value::Array(
collection_uuids
.into_iter()
- .map(|v| v.into())
+ .map(core::convert::Into::into)
.collect::<Vec<rmpv::Value>>(),
),
serialize_date(Utc::now().naive_utc()),
)
- } else {
- (
- convert_option(cipher.user_uuid.clone()),
- Value::Nil,
- serialize_date(cipher.updated_at),
- )
- };
-
+ },
+ );
let data = create_update(
vec![
("Id".into(), cipher.uuid.clone().into()),
@@ -432,7 +420,7 @@ impl WebSocketUsers {
user_uuid: &str,
auth_request_uuid: &str,
acting_device_uuid: &str,
- _: &mut DbConn,
+ _: &DbConn,
) {
let data = create_update(
vec![
@@ -450,7 +438,7 @@ impl WebSocketUsers {
user_uuid: &str,
auth_response_uuid: &str,
approving_device_uuid: String,
- _: &mut DbConn,
+ _: &DbConn,
) {
let data = create_update(
vec![
@@ -510,7 +498,6 @@ fn create_update(
acting_device_uuid: Option<String>,
) -> Vec<u8> {
use rmpv::Value as V;
-
let value = V::Array(vec![
1.into(),
V::Map(vec![]),
@@ -519,15 +506,12 @@ fn create_update(
V::Array(vec![V::Map(vec![
(
"ContextId".into(),
- acting_device_uuid
- .map(|v| v.into())
- .unwrap_or_else(|| V::Nil),
+ acting_device_uuid.map_or(V::Nil, core::convert::Into::into),
),
("Type".into(), (ut as i32).into()),
("Payload".into(), payload.into()),
])]),
]);
-
serialize(value)
}
@@ -565,23 +549,18 @@ pub enum UpdateType {
SyncLoginDelete = 2,
SyncFolderDelete = 3,
SyncCiphers = 4,
-
SyncVault = 5,
SyncOrgKeys = 6,
SyncFolderCreate = 7,
SyncFolderUpdate = 8,
SyncCipherDelete = 9,
SyncSettings = 10,
-
LogOut = 11,
-
SyncSendCreate = 12,
SyncSendUpdate = 13,
SyncSendDelete = 14,
-
AuthRequest = 15,
AuthRequestResponse = 16,
-
None = 100,
}
diff --git a/src/api/web.rs b/src/api/web.rs
@@ -28,8 +28,8 @@ pub fn catchers() -> Vec<Catcher> {
}
#[catch(404)]
-fn not_found() -> &'static str {
- "Admin panel or web vault is permanently disabled."
+const fn not_found() -> &'static str {
+ "Web vault is permanently disabled."
}
#[get("/")]
@@ -41,9 +41,9 @@ async fn web_index() -> Cached<Option<NamedFile>> {
false,
)
}
-
+#[allow(clippy::unnecessary_wraps)]
#[head("/")]
-fn web_index_head() -> EmptyResult {
+const fn web_index_head() -> EmptyResult {
// Add an explicit HEAD route to prevent uptime monitoring services from
// generating "No matching routes for HEAD /" error messages.
//
@@ -119,7 +119,7 @@ use crate::db::DbConn;
fn alive(_conn: DbConn) -> Json<String> {
now()
}
-
+#[allow(clippy::unnecessary_wraps)]
#[head("/alive")]
fn alive_head(_conn: DbConn) -> EmptyResult {
// Avoid logging spurious "No matching routes for HEAD /alive" errors
@@ -127,13 +127,9 @@ fn alive_head(_conn: DbConn) -> EmptyResult {
Ok(())
}
#[get("/vw_static/<filename>", rank = 2)]
-pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Error> {
+fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Error> {
match filename {
"404.png" => Ok((ContentType::PNG, include_bytes!("../static/images/404.png"))),
- "mail-github.png" => Ok((
- ContentType::PNG,
- include_bytes!("../static/images/mail-github.png"),
- )),
"logo-gray.png" => Ok((
ContentType::PNG,
include_bytes!("../static/images/logo-gray.png"),
@@ -142,10 +138,6 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
ContentType::SVG,
include_bytes!("../static/images/error-x.svg"),
)),
- "hibp.png" => Ok((
- ContentType::PNG,
- include_bytes!("../static/images/hibp.png"),
- )),
"vaultwarden-icon.png" => Ok((
ContentType::PNG,
include_bytes!("../static/images/vaultwarden-icon.png"),
@@ -158,30 +150,6 @@ pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Erro
ContentType::CSS,
include_bytes!("../static/scripts/404.css"),
)),
- "admin.css" => Ok((
- ContentType::CSS,
- include_bytes!("../static/scripts/admin.css"),
- )),
- "admin.js" => Ok((
- ContentType::JavaScript,
- include_bytes!("../static/scripts/admin.js"),
- )),
- "admin_settings.js" => Ok((
- ContentType::JavaScript,
- include_bytes!("../static/scripts/admin_settings.js"),
- )),
- "admin_users.js" => Ok((
- ContentType::JavaScript,
- include_bytes!("../static/scripts/admin_users.js"),
- )),
- "admin_organizations.js" => Ok((
- ContentType::JavaScript,
- include_bytes!("../static/scripts/admin_organizations.js"),
- )),
- "admin_diagnostics.js" => Ok((
- ContentType::JavaScript,
- include_bytes!("../static/scripts/admin_diagnostics.js"),
- )),
"bootstrap.css" => Ok((
ContentType::CSS,
include_bytes!("../static/scripts/bootstrap.css"),
diff --git a/src/auth.rs b/src/auth.rs
@@ -18,7 +18,7 @@ static DEFAULT_VALIDITY: OnceLock<Duration> = OnceLock::new();
fn init_default_validity() {
DEFAULT_VALIDITY
.set(Duration::hours(2))
- .expect("DEFAULT_VALIDITY must only be initialized once")
+ .expect("DEFAULT_VALIDITY must only be initialized once");
}
#[inline]
pub fn get_default_validity() -> &'static Duration {
@@ -31,7 +31,7 @@ static JWT_HEADER: OnceLock<Header> = OnceLock::new();
fn init_jwt_header() {
JWT_HEADER
.set(Header::new(JWT_ALGORITHM))
- .expect("JWT_HEADER must only be initialized once")
+ .expect("JWT_HEADER must only be initialized once");
}
#[inline]
fn get_jwt_header() -> &'static Header {
@@ -39,12 +39,12 @@ fn get_jwt_header() -> &'static Header {
.get()
.expect("JWT_HEADER must be initialized in main")
}
-pub static JWT_LOGIN_ISSUER: OnceLock<String> = OnceLock::new();
+static JWT_LOGIN_ISSUER: OnceLock<String> = OnceLock::new();
#[inline]
fn init_jwt_login_issuer() {
JWT_LOGIN_ISSUER
.set(format!("{}|login", config::get_config().domain_origin()))
- .expect("JWT_LOGIN_ISSUER must only be initialized once")
+ .expect("JWT_LOGIN_ISSUER must only be initialized once");
}
#[inline]
pub fn get_jwt_login_issuer() -> &'static str {
@@ -58,7 +58,7 @@ static JWT_INVITE_ISSUER: OnceLock<String> = OnceLock::new();
fn init_jwt_invite_issuer() {
JWT_INVITE_ISSUER
.set(format!("{}|invite", config::get_config().domain_origin()))
- .expect("JWT_INVITE_ISSUER must only be initialized once")
+ .expect("JWT_INVITE_ISSUER must only be initialized once");
}
#[inline]
fn get_jwt_invite_issuer() -> &'static str {
@@ -72,7 +72,7 @@ static JWT_DELETE_ISSUER: OnceLock<String> = OnceLock::new();
fn init_jwt_delete_issuer() {
JWT_DELETE_ISSUER
.set(format!("{}|delete", config::get_config().domain_origin()))
- .expect("JWT_DELETE_ISSUER must only be initialized once")
+ .expect("JWT_DELETE_ISSUER must only be initialized once");
}
#[inline]
fn get_jwt_delete_issuer() -> &'static str {
@@ -89,7 +89,7 @@ fn init_jwt_verifyemail_issuer() {
"{}|verifyemail",
config::get_config().domain_origin()
))
- .expect("JWT_VERIFYEMAIL_ISSUER must only be initialized once")
+ .expect("JWT_VERIFYEMAIL_ISSUER must only be initialized once");
}
#[inline]
fn get_jwt_verifyemail_issuer() -> &'static str {
@@ -106,7 +106,7 @@ fn init_jwt_org_api_key_issuer() {
"{}|api.organization",
config::get_config().domain_origin()
))
- .expect("JWT_ORG_API_KEY_ISSUER must only be initialized once")
+ .expect("JWT_ORG_API_KEY_ISSUER must only be initialized once");
}
#[inline]
fn get_jwt_org_api_key_issuer() -> &'static str {
@@ -123,7 +123,7 @@ fn init_jwt_file_download_issuer() {
"{}|file_download",
config::get_config().domain_origin()
))
- .expect("JWT_FILE_DOWNLOAD_ISSUER must only be initialized once")
+ .expect("JWT_FILE_DOWNLOAD_ISSUER must only be initialized once");
}
#[inline]
fn get_jwt_file_download_issuer() -> &'static str {
@@ -135,7 +135,7 @@ fn get_jwt_file_download_issuer() -> &'static str {
static RSA_KEYS: OnceLock<(EncodingKey, DecodingKey)> = OnceLock::new();
#[allow(clippy::verbose_file_reads)]
#[inline]
-pub fn init_rsa_keys() -> Result<(), Error> {
+fn init_rsa_keys() -> Result<(), Error> {
let mut priv_file = File::options()
.create(true)
.read(true)
@@ -185,6 +185,7 @@ pub fn init_values() {
init_jwt_verifyemail_issuer();
init_jwt_org_api_key_issuer();
init_jwt_file_download_issuer();
+ init_rsa_keys().expect("error creating or reading RSA keys");
}
pub fn encode_jwt<T: Serialize>(claims: &T) -> String {
match jsonwebtoken::encode(get_jwt_header(), claims, get_private_rsa_key()) {
@@ -246,17 +247,14 @@ pub struct LoginJwtClaims {
pub iss: String,
// Subject
pub sub: String,
-
pub premium: bool,
pub name: String,
pub email: String,
pub email_verified: bool,
-
pub orgowner: Vec<String>,
pub orgadmin: Vec<String>,
pub orguser: Vec<String>,
pub orgmanager: Vec<String>,
-
// user security_stamp
pub sstamp: String,
// device uuid
@@ -270,35 +268,33 @@ pub struct LoginJwtClaims {
#[derive(Debug, Serialize, Deserialize)]
pub struct InviteJwtClaims {
// Not before
- pub nbf: i64,
+ nbf: i64,
// Expiration time
- pub exp: i64,
+ exp: i64,
// Issuer
- pub iss: String,
+ iss: String,
// Subject
- pub sub: String,
-
+ sub: String,
pub email: String,
pub org_id: Option<String>,
pub user_org_id: Option<String>,
- pub invited_by_email: Option<String>,
+ invited_by_email: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
-pub struct EmergencyAccessInviteJwtClaims {
+struct EmergencyAccessInviteJwtClaims {
// Not before
- pub nbf: i64,
+ nbf: i64,
// Expiration time
- pub exp: i64,
+ exp: i64,
// Issuer
- pub iss: String,
+ iss: String,
// Subject
- pub sub: String,
-
- pub email: String,
- pub emer_id: String,
- pub grantor_name: String,
- pub grantor_email: String,
+ sub: String,
+ email: String,
+ emer_id: String,
+ grantor_name: String,
+ grantor_email: String,
}
#[derive(Debug, Serialize, Deserialize)]
@@ -311,7 +307,6 @@ pub struct OrgApiKeyLoginJwtClaims {
pub iss: String,
// Subject
pub sub: String,
-
pub client_id: String,
pub client_sub: String,
pub scope: Vec<String>,
@@ -336,36 +331,24 @@ pub fn generate_organization_api_key_login_claims(
#[derive(Debug, Serialize, Deserialize)]
pub struct FileDownloadClaims {
// Not before
- pub nbf: i64,
+ nbf: i64,
// Expiration time
- pub exp: i64,
+ exp: i64,
// Issuer
- pub iss: String,
+ iss: String,
// Subject
pub sub: String,
-
pub file_id: String,
}
-pub fn generate_file_download_claims(uuid: String, file_id: String) -> FileDownloadClaims {
- let time_now = Utc::now().naive_utc();
- FileDownloadClaims {
- nbf: time_now.timestamp(),
- exp: (time_now + Duration::minutes(5)).timestamp(),
- iss: get_jwt_file_download_issuer().to_string(),
- sub: uuid,
- file_id,
- }
-}
-
#[derive(Debug, Serialize, Deserialize)]
pub struct BasicJwtClaims {
// Not before
- pub nbf: i64,
+ nbf: i64,
// Expiration time
- pub exp: i64,
+ exp: i64,
// Issuer
- pub iss: String,
+ iss: String,
// Subject
pub sub: String,
}
@@ -391,7 +374,7 @@ impl<'r> FromRequest<'r> for Host {
type Error = &'static str;
async fn from_request(_: &'r Request<'_>) -> Outcome<Self, Self::Error> {
- Outcome::Success(Host {
+ Outcome::Success(Self {
host: config::get_config().domain.to_string(),
})
}
@@ -409,18 +392,16 @@ impl<'r> FromRequest<'r> for ClientHeaders {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let host = try_outcome!(Host::from_request(request).await).host;
- let ip = match ClientIp::from_request(request).await {
- Outcome::Success(ip) => ip,
- _ => err_handler!("Error getting Client IP"),
+ let Outcome::Success(ip) = ClientIp::from_request(request).await else {
+ err_handler!("Error getting Client IP")
};
// When unknown or unable to parse, return 14, which is 'Unknown Browser'
let device_type: i32 = request
.headers()
.get_one("device-type")
- .map(|d| d.parse().unwrap_or(14))
- .unwrap_or_else(|| 14);
+ .map_or(14, |d| d.parse().unwrap_or(14));
- Outcome::Success(ClientHeaders {
+ Outcome::Success(Self {
host,
device_type,
ip,
@@ -438,14 +419,11 @@ pub struct Headers {
#[rocket::async_trait]
impl<'r> FromRequest<'r> for Headers {
type Error = &'static str;
-
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = request.headers();
-
let host = try_outcome!(Host::from_request(request).await).host;
- let ip = match ClientIp::from_request(request).await {
- Outcome::Success(ip) => ip,
- _ => err_handler!("Error getting Client IP"),
+ let Outcome::Success(ip) = ClientIp::from_request(request).await else {
+ err_handler!("Error getting Client IP")
};
// Get access_token
@@ -458,39 +436,33 @@ impl<'r> FromRequest<'r> for Headers {
};
// Check JWT token is valid and get device and user from it
- let claims = match decode_login(access_token) {
- Ok(claims) => claims,
- Err(_) => err_handler!("Invalid claim"),
+ let Ok(claims) = decode_login(access_token) else {
+ err_handler!("Invalid claim")
};
let device_uuid = claims.device;
let user_uuid = claims.sub;
- let mut conn = match DbConn::from_request(request).await {
- Outcome::Success(conn) => conn,
- _ => err_handler!("Error getting DB"),
+ let Outcome::Success(conn) = DbConn::from_request(request).await else {
+ err_handler!("Error getting DB")
};
- let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await
- {
- Some(device) => device,
- None => err_handler!("Invalid device id"),
+ let Some(device) = Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &conn).await
+ else {
+ err_handler!("Invalid device id")
};
- let user = match User::find_by_uuid(&user_uuid, &mut conn).await {
- Some(user) => user,
- None => err_handler!("Device has no user associated"),
+ let Some(user) = User::find_by_uuid(&user_uuid, &conn).await else {
+ err_handler!("Device has no user associated")
};
-
if user.security_stamp != claims.sstamp {
if let Some(stamp_exception) = user
.stamp_exception
.as_deref()
.and_then(|s| serde_json::from_str::<UserStampException>(s).ok())
{
- let current_route = match request.route().and_then(|r| r.name.as_deref()) {
- Some(name) => name,
- _ => err_handler!("Error getting current route for stamp exception"),
+ let Some(current_route) = request.route().and_then(|r| r.name.as_deref()) else {
+ err_handler!("Error getting current route for stamp exception")
};
// Check if the stamp exception has expired first.
@@ -501,7 +473,7 @@ impl<'r> FromRequest<'r> for Headers {
// This prevents checking this stamp exception for new requests.
let mut user = user;
user.reset_stamp_exception();
- _ = user.save(&mut conn).await;
+ _ = user.save(&conn).await;
err_handler!("Stamp exception is expired")
} else if !stamp_exception.routes.contains(¤t_route.to_string()) {
err_handler!(
@@ -515,7 +487,7 @@ impl<'r> FromRequest<'r> for Headers {
}
}
- Outcome::Success(Headers {
+ Outcome::Success(Self {
host,
device,
user,
@@ -525,13 +497,13 @@ impl<'r> FromRequest<'r> for Headers {
}
pub struct OrgHeaders {
- pub host: String,
- pub device: Device,
- pub user: User,
- pub org_user_type: UserOrgType,
- pub org_user: UserOrganization,
+ host: String,
+ device: Device,
+ user: User,
+ org_user_type: UserOrgType,
+ org_user: UserOrganization,
pub org_id: String,
- pub ip: ClientIp,
+ ip: ClientIp,
}
#[rocket::async_trait]
@@ -563,28 +535,28 @@ impl<'r> FromRequest<'r> for OrgHeaders {
match url_org_id {
Some(org_id) => {
- let mut conn = match DbConn::from_request(request).await {
- Outcome::Success(conn) => conn,
- _ => err_handler!("Error getting DB"),
- };
-
let user = headers.user;
- let org_user =
- match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn)
- .await
- {
- Some(user) => {
- if user.status == UserOrgStatus::Confirmed as i32 {
- user
- } else {
- err_handler!(
+ let org_user = match DbConn::from_request(request).await {
+ Outcome::Success(conn) => {
+ match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &conn)
+ .await
+ {
+ Some(user) => {
+ if user.status == UserOrgStatus::Confirmed as i32 {
+ user
+ } else {
+ err_handler!(
"The current user isn't confirmed member of the organization"
)
+ }
+ }
+ None => {
+ err_handler!("The current user isn't member of the organization")
}
}
- None => err_handler!("The current user isn't member of the organization"),
- };
-
+ }
+ _ => err_handler!("Error getting DB"),
+ };
Outcome::Success(Self {
host: headers.host,
device: headers.device,
@@ -609,11 +581,11 @@ impl<'r> FromRequest<'r> for OrgHeaders {
pub struct AdminHeaders {
pub host: String,
- pub device: Device,
+ device: Device,
pub user: User,
pub org_user_type: UserOrgType,
pub client_version: Option<String>,
- pub ip: ClientIp,
+ ip: ClientIp,
}
#[rocket::async_trait]
@@ -642,8 +614,8 @@ impl<'r> FromRequest<'r> for AdminHeaders {
}
impl From<AdminHeaders> for Headers {
- fn from(h: AdminHeaders) -> Headers {
- Headers {
+ fn from(h: AdminHeaders) -> Self {
+ Self {
host: h.host,
device: h.device,
user: h.user,
@@ -675,11 +647,11 @@ fn get_col_id(request: &Request<'_>) -> Option<String> {
/// and have access to the specific collection provided via the <col_id>/collections/collectionId.
/// This does strict checking on the collection_id, ManagerHeadersLoose does not.
pub struct ManagerHeaders {
- pub host: String,
- pub device: Device,
+ host: String,
+ device: Device,
pub user: User,
pub org_user_type: UserOrgType,
- pub ip: ClientIp,
+ ip: ClientIp,
}
#[rocket::async_trait]
@@ -691,12 +663,11 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
if headers.org_user_type >= UserOrgType::Manager {
match get_col_id(request) {
Some(col_id) => {
- let mut conn = match DbConn::from_request(request).await {
- Outcome::Success(conn) => conn,
- _ => err_handler!("Error getting DB"),
+ let Outcome::Success(conn) = DbConn::from_request(request).await else {
+ err_handler!("Error getting DB")
};
- if !can_access_collection(&headers.org_user, &col_id, &mut conn).await {
+ if !can_access_collection(&headers.org_user, &col_id, &conn).await {
err_handler!("The current user isn't a manager for this collection")
}
}
@@ -717,8 +688,8 @@ impl<'r> FromRequest<'r> for ManagerHeaders {
}
impl From<ManagerHeaders> for Headers {
- fn from(h: ManagerHeaders) -> Headers {
- Headers {
+ fn from(h: ManagerHeaders) -> Self {
+ Self {
host: h.host,
device: h.device,
user: h.user,
@@ -730,12 +701,12 @@ impl From<ManagerHeaders> for Headers {
/// The ManagerHeadersLoose is used when you at least need to be a Manager,
/// but there is no collection_id sent with the request (either in the path or as form data).
pub struct ManagerHeadersLoose {
- pub host: String,
- pub device: Device,
+ host: String,
+ device: Device,
pub user: User,
pub org_user: UserOrganization,
- pub org_user_type: UserOrgType,
- pub ip: ClientIp,
+ org_user_type: UserOrgType,
+ ip: ClientIp,
}
#[rocket::async_trait]
@@ -760,8 +731,8 @@ impl<'r> FromRequest<'r> for ManagerHeadersLoose {
}
impl From<ManagerHeadersLoose> for Headers {
- fn from(h: ManagerHeadersLoose) -> Headers {
- Headers {
+ fn from(h: ManagerHeadersLoose) -> Self {
+ Self {
host: h.host,
device: h.device,
user: h.user,
@@ -769,11 +740,7 @@ impl From<ManagerHeadersLoose> for Headers {
}
}
}
-async fn can_access_collection(
- org_user: &UserOrganization,
- col_id: &str,
- conn: &mut DbConn,
-) -> bool {
+async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &DbConn) -> bool {
org_user.has_full_access()
|| Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn)
.await
@@ -783,8 +750,8 @@ impl ManagerHeaders {
pub async fn from_loose(
h: ManagerHeadersLoose,
collections: &Vec<String>,
- conn: &mut DbConn,
- ) -> Result<ManagerHeaders, Error> {
+ conn: &DbConn,
+ ) -> Result<Self, Error> {
for col_id in collections {
if uuid::Uuid::parse_str(col_id).is_err() {
err!("Collection Id is malformed!");
@@ -794,7 +761,7 @@ impl ManagerHeaders {
}
}
- Ok(ManagerHeaders {
+ Ok(Self {
host: h.host,
device: h.device,
user: h.user,
@@ -845,20 +812,18 @@ impl<'r> FromRequest<'r> for ClientIp {
async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let ip = req.headers().get_one("X-Real-IP").and_then(|ip| {
- match ip.find(',') {
- Some(idx) => &ip[..idx],
- None => ip,
- }
- .parse()
- .map_err(|_| warn!("'X-Real-IP' header is malformed: {ip}"))
- .ok()
+ ip.find(',')
+ .map_or(ip, |idx| &ip[..idx])
+ .parse()
+ .map_err(|_| warn!("'X-Real-IP' header is malformed: {ip}"))
+ .ok()
});
let ip = ip
.or_else(|| req.remote().map(|r| r.ip()))
.unwrap_or_else(|| "0.0.0.0".parse().unwrap());
- Outcome::Success(ClientIp { ip })
+ Outcome::Success(Self { ip })
}
}
@@ -872,13 +837,9 @@ impl<'r> FromRequest<'r> for WsAccessTokenHeader {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
let headers = request.headers();
-
- // Get access_token
- let access_token = match headers.get_one("Authorization") {
- Some(a) => a.rsplit("Bearer ").next().map(String::from),
- None => None,
- };
-
+ let access_token = headers
+ .get_one("Authorization")
+ .and_then(|a| a.rsplit("Bearer ").next().map(String::from));
Outcome::Success(Self { access_token })
}
}
diff --git a/src/config.rs b/src/config.rs
@@ -15,7 +15,7 @@ static CONFIG: OnceLock<Config> = OnceLock::new();
pub fn init_config(cur_dir: &mut PathBuf) {
CONFIG
.set(Config::load(cur_dir).expect("valid TOML config file at 'config.toml'"))
- .expect("CONFIG must only be initialized once")
+ .expect("CONFIG must only be initialized once");
}
#[inline]
pub fn get_config() -> &'static Config {
@@ -27,6 +27,7 @@ pub enum ConfigErr {
De(de::Error),
Url(ParseError),
BadDomain,
+ InvalidPasswordIterations(i32),
}
impl Display for ConfigErr {
#[inline]
@@ -38,6 +39,10 @@ impl Display for ConfigErr {
Self::BadDomain => f.write_str(
"https://<domain>:<port> was unable to be parsed into a URL with a domain",
),
+ Self::InvalidPasswordIterations(count) => write!(
+ f,
+ "password iterations is {count} but must be at least 100000"
+ ),
}
}
}
@@ -74,11 +79,9 @@ struct ConfigFile {
db_connection_retries: Option<NonZeroU8>,
domain: String,
ip: IpAddr,
- org_attachment_limit: Option<u32>,
password_iterations: Option<i32>,
port: u16,
tls: Tls,
- user_attachment_limit: Option<u32>,
web_vault_enabled: Option<bool>,
workers: Option<NonZeroU8>,
}
@@ -88,10 +91,8 @@ pub struct Config {
pub database_timeout: u16,
pub db_connection_retries: NonZeroU8,
pub domain: Url,
- pub org_attachment_limit: Option<u32>,
pub password_iterations: i32,
pub rocket: rocket::Config,
- pub user_attachment_limit: Option<u32>,
pub web_vault_enabled: bool,
}
impl Config {
@@ -156,10 +157,16 @@ impl Config {
.db_connection_retries
.unwrap_or(NonZeroU8::new(15).unwrap()),
domain,
- org_attachment_limit: config_file.org_attachment_limit,
- password_iterations: config_file.password_iterations.unwrap_or(600_000),
+ password_iterations: match config_file.password_iterations {
+ None => 600_000,
+ Some(count) => {
+ if count < 100_000 {
+ return Err(ConfigErr::InvalidPasswordIterations(count));
+ }
+ count
+ }
+ },
rocket,
- user_attachment_limit: config_file.user_attachment_limit,
web_vault_enabled: config_file.web_vault_enabled.unwrap_or(true),
})
}
diff --git a/src/crypto.rs b/src/crypto.rs
@@ -1,10 +1,6 @@
-//
-// PBKDF2 derivation
-//
-use std::num::NonZeroU32;
-
-use data_encoding::{Encoding, HEXLOWER};
+use data_encoding::Encoding;
use ring::{digest, pbkdf2};
+use std::num::NonZeroU32;
static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256;
const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN;
@@ -44,7 +40,7 @@ pub fn encode_random_bytes<const N: usize>(e: Encoding) -> String {
}
/// Generates a random string over a specified alphabet.
-pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
+fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
// Ref: https://rust-lang-nursery.github.io/rust-cookbook/algorithms/randomness.html
use rand::Rng;
let mut rng = rand::thread_rng();
@@ -58,22 +54,13 @@ pub fn get_random_string(alphabet: &[u8], num_chars: usize) -> String {
}
/// Generates a random alphanumeric string.
-pub fn get_random_string_alphanum(num_chars: usize) -> String {
+fn get_random_string_alphanum(num_chars: usize) -> String {
const ALPHABET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
get_random_string(ALPHABET, num_chars)
}
-pub fn generate_id<const N: usize>() -> String {
- encode_random_bytes::<N>(HEXLOWER)
-}
-
-pub fn generate_attachment_id() -> String {
- // Attachment IDs are scoped to a cipher, so they can be smaller.
- generate_id::<10>() // 80 bits
-}
-
/// Generates a personal API key.
/// Upstream uses 30 chars, which is ~178 bits of entropy.
pub fn generate_api_key() -> String {
diff --git a/src/db/mod.rs b/src/db/mod.rs
@@ -1,248 +1,149 @@
-use std::{sync::Arc, time::Duration};
-
+use crate::{
+ config::{self, Config},
+ error::{Error, MapResult},
+};
use diesel::{
connection::SimpleConnection,
r2d2::{ConnectionManager, CustomizeConnection, Pool, PooledConnection},
};
-
use rocket::{
http::Status,
request::{FromRequest, Outcome},
Request,
};
-
+use std::{sync::Arc, time::Duration};
use tokio::{
sync::{Mutex, OwnedSemaphorePermit, Semaphore},
time::timeout,
};
-
-use crate::{
- config::{self, Config},
- error::{Error, MapResult},
-};
-
#[path = "schemas/sqlite/schema.rs"]
-pub mod __sqlite_schema;
+mod __sqlite_schema;
// These changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools
// A wrapper around spawn_blocking that propagates panics to the calling code.
-pub async fn run_blocking<F, R>(job: F) -> R
+async fn run_blocking<F, R>(job: F) -> R
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
match tokio::task::spawn_blocking(job).await {
Ok(ret) => ret,
- Err(e) => match e.try_into_panic() {
- Ok(panic) => std::panic::resume_unwind(panic),
- Err(_) => unreachable!("spawn_blocking tasks are never cancelled"),
- },
+ Err(e) => e.try_into_panic().map_or_else(
+ |_| unreachable!("spawn_blocking tasks are never cancelled"),
+ |panic| std::panic::resume_unwind(panic),
+ ),
}
}
-
-// This is used to generate the main DbConn and DbPool enums, which contain one variant for each database supported
-macro_rules! generate_connections {
- ( $( $name:ident: $ty:ty ),+ ) => {
- #[allow(non_camel_case_types, dead_code)]
- #[derive(Eq, PartialEq)]
- pub enum DbConnType { $( $name, )+ }
-
- pub struct DbConn {
- conn: Arc<Mutex<Option<DbConnInner>>>,
- permit: Option<OwnedSemaphorePermit>,
- }
-
- #[allow(non_camel_case_types)]
- pub enum DbConnInner { $( #[cfg($name)] $name(PooledConnection<ConnectionManager< $ty >>), )+ }
-
- #[derive(Debug)]
- pub struct DbConnOptions {
- pub init_stmts: String,
- }
-
- $( // Based on <https://stackoverflow.com/a/57717533>.
- #[cfg($name)]
- impl CustomizeConnection<$ty, diesel::r2d2::Error> for DbConnOptions {
- fn on_acquire(&self, conn: &mut $ty) -> Result<(), diesel::r2d2::Error> {
- if !self.init_stmts.is_empty() {
- conn.batch_execute(&self.init_stmts).map_err(diesel::r2d2::Error::QueryError)?;
- }
- Ok(())
- }
- })+
-
- #[derive(Clone)]
- pub struct DbPool {
- // This is an 'Option' so that we can drop the pool in a 'spawn_blocking'.
- pool: Option<DbPoolInner>,
- semaphore: Arc<Semaphore>
- }
-
- #[allow(non_camel_case_types)]
- #[derive(Clone)]
- pub enum DbPoolInner { $( #[cfg($name)] $name(Pool<ConnectionManager< $ty >>), )+ }
-
- impl Drop for DbConn {
- fn drop(&mut self) {
- let conn = Arc::clone(&self.conn);
- let permit = self.permit.take();
-
- // Since connection can't be on the stack in an async fn during an
- // await, we have to spawn a new blocking-safe thread...
- tokio::task::spawn_blocking(move || {
- // And then re-enter the runtime to wait on the async mutex, but in a blocking fashion.
- let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned());
-
- if let Some(conn) = conn.take() {
- drop(conn);
- }
-
- // Drop permit after the connection is dropped
- drop(permit);
- });
- }
- }
-
- impl Drop for DbPool {
- fn drop(&mut self) {
- let pool = self.pool.take();
- tokio::task::spawn_blocking(move || drop(pool));
- }
- }
-
- impl DbPool {
- // For the given database URL, guess its type, run migrations, create pool, and return it
- pub fn from_config() -> Result<Self, Error> {
- let url = Config::DATABASE_URL;
- let conn_type = DbConnType::from_url(&url)?;
-
- match conn_type { $(
- DbConnType::$name => {
- #[cfg($name)]
- {
- paste::paste!{ [< $name _migrations >]::run_migrations()?; }
- let manager = ConnectionManager::new(url);
- let pool = Pool::builder()
- .max_size(config::get_config().database_max_conns.get() as u32)
- .connection_timeout(Duration::from_secs(config::get_config().database_timeout as u64))
- .connection_customizer(Box::new(DbConnOptions{
- init_stmts: conn_type.get_init_stmts()
- }))
- .build(manager)
- .map_res("Failed to create pool")?;
- Ok(DbPool {
- pool: Some(DbPoolInner::$name(pool)),
- semaphore: Arc::new(Semaphore::new(config::get_config().database_max_conns.get() as usize)),
- })
- }
- #[cfg(not($name))]
- unreachable!("Trying to use a DB backend when it's feature is disabled")
- },
- )+ }
- }
- // Get a connection from the pool
- pub async fn get(&self) -> Result<DbConn, Error> {
- let duration = Duration::from_secs(config::get_config().database_timeout as u64);
- let permit = match timeout(duration, Arc::clone(&self.semaphore).acquire_owned()).await {
- Ok(p) => p.expect("Semaphore should be open"),
- Err(_) => {
- err!("Timeout waiting for database connection");
- }
- };
-
- match self.pool.as_ref().expect("DbPool.pool should always be Some()") { $(
- #[cfg($name)]
- DbPoolInner::$name(p) => {
- let pool = p.clone();
- let c = run_blocking(move || pool.get_timeout(duration)).await.map_res("Error retrieving connection from pool")?;
-
- Ok(DbConn {
- conn: Arc::new(Mutex::new(Some(DbConnInner::$name(c)))),
- permit: Some(permit)
- })
- },
- )+ }
- }
- }
- };
+pub struct DbConn {
+ conn: Arc<Mutex<Option<PooledConnection<ConnectionManager<diesel::SqliteConnection>>>>>,
+ permit: Option<OwnedSemaphorePermit>,
}
-
-generate_connections! {
- sqlite: diesel::sqlite::SqliteConnection,
- mysql: diesel::mysql::MysqlConnection,
- postgresql: diesel::pg::PgConnection
+#[derive(Debug)]
+struct DbConnOptions;
+impl CustomizeConnection<diesel::SqliteConnection, diesel::r2d2::Error> for DbConnOptions {
+ fn on_acquire(&self, conn: &mut diesel::SqliteConnection) -> Result<(), diesel::r2d2::Error> {
+ conn.batch_execute("PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;")
+ .map_err(diesel::r2d2::Error::QueryError)
+ }
+}
+#[derive(Clone)]
+pub struct DbPool {
+ // This is an 'Option' so that we can drop the pool in a 'spawn_blocking'.
+ pool: Option<Pool<ConnectionManager<diesel::SqliteConnection>>>,
+ semaphore: Arc<Semaphore>,
}
-impl DbConnType {
- pub fn from_url(_: &str) -> Result<DbConnType, Error> {
- Ok(DbConnType::sqlite)
+impl Drop for DbConn {
+ fn drop(&mut self) {
+ let conn = Arc::clone(&self.conn);
+ let permit = self.permit.take();
+ // Since connection can't be on the stack in an async fn during an
+ // await, we have to spawn a new blocking-safe thread...
+ tokio::task::spawn_blocking(move || {
+ // And then re-enter the runtime to wait on the async mutex, but in a blocking fashion.
+ let mut conn = tokio::runtime::Handle::current().block_on(conn.lock_owned());
+ if let Some(conn) = conn.take() {
+ drop(conn);
+ }
+ // Drop permit after the connection is dropped
+ drop(permit);
+ });
}
-
- pub fn get_init_stmts(&self) -> String {
- self.default_init_stmts()
+}
+impl Drop for DbPool {
+ fn drop(&mut self) {
+ let pool = self.pool.take();
+ tokio::task::spawn_blocking(move || drop(pool));
}
-
- pub fn default_init_stmts(&self) -> String {
- "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;".to_string()
+}
+impl DbPool {
+ // For the given database URL, guess its type, run migrations, create pool, and return it
+ #[allow(clippy::cast_lossless)]
+ pub fn from_config() -> Result<Self, Error> {
+ let url = Config::DATABASE_URL;
+ paste::paste! {sqlite_migrations::run_migrations()?; }
+ let manager = ConnectionManager::new(url);
+ let pool = Pool::builder()
+ .max_size(config::get_config().database_max_conns.get() as u32)
+ .connection_timeout(Duration::from_secs(
+ config::get_config().database_timeout as u64,
+ ))
+ .connection_customizer(Box::new(DbConnOptions))
+ .build(manager)
+ .map_res("Failed to create pool")?;
+ Ok(Self {
+ pool: Some(pool),
+ semaphore: Arc::new(Semaphore::new(
+ config::get_config().database_max_conns.get() as usize,
+ )),
+ })
+ }
+ // Get a connection from the pool
+ #[allow(clippy::cast_lossless)]
+ async fn get(&self) -> Result<DbConn, Error> {
+ let duration = Duration::from_secs(config::get_config().database_timeout as u64);
+ let permit = match timeout(duration, Arc::clone(&self.semaphore).acquire_owned()).await {
+ Ok(p) => p.expect("Semaphore should be open"),
+ Err(_) => {
+ err!("Timeout waiting for database connection");
+ }
+ };
+ let pool = self
+ .pool
+ .as_ref()
+ .expect("DbPool.pool should always be Some()")
+ .clone();
+ let c = run_blocking(move || pool.get_timeout(duration))
+ .await
+ .map_res("Error retrieving connection from pool")?;
+ Ok(DbConn {
+ conn: Arc::new(Mutex::new(Some(c))),
+ permit: Some(permit),
+ })
}
}
-
#[macro_export]
macro_rules! db_run {
- // Same for all dbs
( $conn:ident: $body:block ) => {
- db_run! { $conn: sqlite, mysql, postgresql $body }
- };
-
- ( @raw $conn:ident: $body:block ) => {
- db_run! { @raw $conn: sqlite, mysql, postgresql $body }
- };
-
- // Different code for each db
- ( $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
- #[allow(unused)] use diesel::prelude::*;
- #[allow(unused)] use $crate::db::FromDb;
-
- let conn = $conn.conn.clone();
- let mut conn = conn.lock_owned().await;
- match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
- $($(
- #[cfg($db)]
- $crate::db::DbConnInner::$db($conn) => {
- paste::paste! {
- #[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
- #[allow(unused)] use [<__ $db _model>]::*;
- }
-
- tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead
- },
- )+)+
- }
- }};
-
- ( @raw $conn:ident: $( $($db:ident),+ $body:block )+ ) => {{
- #[allow(unused)] use diesel::prelude::*;
- #[allow(unused)] use $crate::db::FromDb;
-
- let conn = $conn.conn.clone();
- let mut conn = conn.lock_owned().await;
- match conn.as_mut().expect("internal invariant broken: self.connection is Some") {
- $($(
- #[cfg($db)]
- $crate::db::DbConnInner::$db($conn) => {
- paste::paste! {
- #[allow(unused)] use $crate::db::[<__ $db _schema>]::{self as schema, *};
- // @ RAW: #[allow(unused)] use [<__ $db _model>]::*;
- }
-
- tokio::task::block_in_place(move || { $body }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead
- },
- )+)+
+ #[allow(unused)]
+ use diesel::prelude::*;
+ #[allow(unused)]
+ use $crate::db::FromDb;
+ let mut con = $conn.conn.clone().lock_owned().await;
+ paste::paste! {
+ #[allow(unused)] use $crate::db::__sqlite_schema::{self as schema, *};
+ #[allow(unused)] use __sqlite_model::*;
}
- }};
+ tokio::task::block_in_place(move || {
+ let $conn = con
+ .as_mut()
+ .expect("internal invariant broken: self.connection is Some");
+ $body
+ }) // Run blocking can't be used due to the 'static limitation, use block_in_place instead
+ };
}
-pub trait FromDb {
+trait FromDb {
type Output;
#[allow(clippy::wrong_self_convention)]
fn from_db(self) -> Self::Output;
@@ -251,7 +152,7 @@ pub trait FromDb {
impl<T: FromDb> FromDb for Vec<T> {
type Output = Vec<T::Output>;
#[allow(clippy::wrong_self_convention)]
- #[inline(always)]
+ #[inline]
fn from_db(self) -> Self::Output {
self.into_iter().map(crate::db::FromDb::from_db).collect()
}
@@ -260,13 +161,13 @@ impl<T: FromDb> FromDb for Vec<T> {
impl<T: FromDb> FromDb for Option<T> {
type Output = Option<T::Output>;
#[allow(clippy::wrong_self_convention)]
- #[inline(always)]
+ #[inline]
fn from_db(self) -> Self::Output {
self.map(crate::db::FromDb::from_db)
}
}
-// For each struct eg. Cipher, we create a CipherDb inside a module named __$db_model (where $db is sqlite, mysql or postgresql),
+// For each struct eg. Cipher, we create a CipherDb inside a module named __sqlite_model,
// to implement the Diesel traits. We also provide methods to convert between them and the basic structs. Later, that module will be auto imported when using db_run!
#[macro_export]
macro_rules! db_object {
@@ -279,14 +180,14 @@ macro_rules! db_object {
)+ ) => {
// Create the normal struct, without attributes
$( pub struct $name { $( /*$( #[$field_attr] )**/ $vis $field : $typ, )+ } )+
- pub mod __sqlite_model { $( db_object! { @db sqlite | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ }
+ mod __sqlite_model { $( db_object! { $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ }
};
- ( @db $db:ident | $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => {
+ ( $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => {
paste::paste! {
#[allow(unused)] use super::*;
#[allow(unused)] use diesel::prelude::*;
- #[allow(unused)] use $crate::db::[<__ $db _schema>]::*;
+ #[allow(unused)] use $crate::db::__sqlite_schema::*;
$( #[$attr] )*
pub struct [<$name Db>] { $(
@@ -294,14 +195,14 @@ macro_rules! db_object {
)+ }
impl [<$name Db>] {
- #[allow(clippy::wrong_self_convention)]
- #[inline(always)] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } }
+ #[allow(clippy::used_underscore_binding, clippy::wrong_self_convention)]
+ #[inline] pub fn to_db(x: &super::$name) -> Self { Self { $( $field: x.$field.clone(), )+ } }
}
impl $crate::db::FromDb for [<$name Db>] {
type Output = super::$name;
- #[allow(clippy::wrong_self_convention)]
- #[inline(always)] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } }
+ #[allow(clippy::used_underscore_binding, clippy::wrong_self_convention)]
+ #[inline] fn from_db(self) -> Self::Output { super::$name { $( $field: self.$field, )+ } }
}
}
};
@@ -318,26 +219,24 @@ impl<'r> FromRequest<'r> for DbConn {
async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> {
match request.rocket().state::<DbPool>() {
- Some(p) => match p.get().await {
- Ok(dbconn) => Outcome::Success(dbconn),
- _ => Outcome::Error((Status::ServiceUnavailable, ())),
- },
+ Some(p) => (p.get().await).map_or_else(
+ |_| Outcome::Error((Status::ServiceUnavailable, ())),
+ Outcome::Success,
+ ),
None => Outcome::Error((Status::InternalServerError, ())),
}
}
}
mod sqlite_migrations {
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
- pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/sqlite");
+ const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/sqlite");
pub fn run_migrations() -> Result<(), super::Error> {
use diesel::{Connection, RunQueryDsl};
let url = crate::Config::DATABASE_URL;
-
// Establish a connection to the sqlite database (this will create a new one, if it does
// not exist, and exit if there is an error).
let mut connection = diesel::sqlite::SqliteConnection::establish(url)?;
-
// Run the migrations after successfully establishing a connection
// Disable Foreign Key Checks during migration
// Scoped to a connection.
@@ -348,7 +247,6 @@ mod sqlite_migrations {
diesel::sql_query("PRAGMA journal_mode=wal")
.execute(&mut connection)
.expect("Failed to turn on WAL");
-
connection
.run_pending_migrations(MIGRATIONS)
.expect("Error running migrations");
diff --git a/src/db/models/attachment.rs b/src/db/models/attachment.rs
@@ -1,236 +0,0 @@
-use crate::config::Config;
-use serde_json::Value;
-use std::fs;
-use std::io::ErrorKind;
-use std::path::Path;
-
-db_object! {
- #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
- #[diesel(table_name = attachments)]
- #[diesel(treat_none_as_null = true)]
- #[diesel(primary_key(id))]
- pub struct Attachment {
- pub id: String,
- pub cipher_uuid: String,
- pub file_name: String, // encrypted
- pub file_size: i32,
- pub akey: Option<String>,
- }
-}
-
-/// Local methods
-impl Attachment {
- pub const fn new(
- id: String,
- cipher_uuid: String,
- file_name: String,
- file_size: i32,
- akey: Option<String>,
- ) -> Self {
- Self {
- id,
- cipher_uuid,
- file_name,
- file_size,
- akey,
- }
- }
-
- pub fn get_file_path(&self) -> String {
- format!(
- "{}/{}/{}",
- Config::ATTACHMENTS_FOLDER,
- self.cipher_uuid,
- self.id
- )
- }
-
- pub fn get_url(&self, host: &str) -> String {
- let token = encode_jwt(&generate_file_download_claims(
- self.cipher_uuid.clone(),
- self.id.clone(),
- ));
- format!(
- "{}/attachments/{}/{}?token={}",
- host, self.cipher_uuid, self.id, token
- )
- }
-
- pub fn to_json(&self, host: &str) -> Value {
- json!({
- "Id": self.id,
- "Url": self.get_url(host),
- "FileName": self.file_name,
- "Size": self.file_size.to_string(),
- "SizeName": crate::util::get_display_size(self.file_size),
- "Key": self.akey,
- "Object": "attachment"
- })
- }
-}
-
-use crate::auth::{encode_jwt, generate_file_download_claims};
-use crate::db::DbConn;
-
-use crate::api::EmptyResult;
-use crate::error::MapResult;
-
-/// Database methods
-impl Attachment {
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn:
- sqlite, mysql {
- match diesel::replace_into(attachments::table)
- .values(AttachmentDb::to_db(self))
- .execute(conn)
- {
- Ok(_) => Ok(()),
- // Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
- Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
- diesel::update(attachments::table)
- .filter(attachments::id.eq(&self.id))
- .set(AttachmentDb::to_db(self))
- .execute(conn)
- .map_res("Error saving attachment")
- }
- Err(e) => Err(e.into()),
- }.map_res("Error saving attachment")
- }
- postgresql {
- let value = AttachmentDb::to_db(self);
- diesel::insert_into(attachments::table)
- .values(&value)
- .on_conflict(attachments::id)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving attachment")
- }
- }
- }
-
- pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn: {
- crate::util::retry(
- || diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
- 10,
- )
- .map_res("Error deleting attachment")?;
- let file_path = &self.get_file_path();
- let path = Path::new(&file_path);
- if let Err(err) = fs::remove_file(path) {
- if err.kind() != ErrorKind::NotFound {
- return Err(err.into())
- }
- }
- match path.parent() {
- None => Ok(()),
- Some(parent) => fs::remove_dir(parent).or_else(|err| match fs::read_dir(parent) {
- Err(err2) => if err2.kind() == ErrorKind::NotFound {
- Ok(())
- } else {
- Err(err2.into())
- },
- Ok(dir) => if dir.count() == 0 {
- Err(err.into())
- } else {
- Ok(())
- }
- })
- }
- }}
- }
-
- pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- for attachment in Attachment::find_by_cipher(cipher_uuid, conn).await {
- attachment.delete(conn).await?;
- }
- Ok(())
- }
-
- pub async fn find_by_id(id: &str, conn: &mut DbConn) -> Option<Self> {
- db_run! { conn: {
- attachments::table
- .filter(attachments::id.eq(id.to_lowercase()))
- .first::<AttachmentDb>(conn)
- .ok()
- .from_db()
- }}
- }
-
- pub async fn find_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- attachments::table
- .filter(attachments::cipher_uuid.eq(cipher_uuid))
- .load::<AttachmentDb>(conn)
- .expect("Error loading attachments")
- .from_db()
- }}
- }
-
- pub async fn size_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
- db_run! { conn: {
- let result: Option<i64> = attachments::table
- .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
- .filter(ciphers::user_uuid.eq(user_uuid))
- .select(diesel::dsl::sum(attachments::file_size))
- .first(conn)
- .expect("Error loading user attachment total size");
- result.unwrap_or(0)
- }}
- }
-
- pub async fn count_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
- db_run! { conn: {
- attachments::table
- .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
- .filter(ciphers::user_uuid.eq(user_uuid))
- .count()
- .first(conn)
- .unwrap_or(0)
- }}
- }
-
- pub async fn size_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
- db_run! { conn: {
- let result: Option<i64> = attachments::table
- .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
- .filter(ciphers::organization_uuid.eq(org_uuid))
- .select(diesel::dsl::sum(attachments::file_size))
- .first(conn)
- .expect("Error loading user attachment total size");
- result.unwrap_or(0)
- }}
- }
-
- pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
- db_run! { conn: {
- attachments::table
- .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
- .filter(ciphers::organization_uuid.eq(org_uuid))
- .count()
- .first(conn)
- .unwrap_or(0)
- }}
- }
-
- // This will return all attachments linked to the user or org
- // There is no filtering done here if the user actually has access!
- // It is used to speed up the sync process, and the matching is done in a different part.
- pub async fn find_all_by_user_and_orgs(
- user_uuid: &str,
- org_uuids: &Vec<String>,
- conn: &mut DbConn,
- ) -> Vec<Self> {
- db_run! { conn: {
- attachments::table
- .left_join(ciphers::table.on(ciphers::uuid.eq(attachments::cipher_uuid)))
- .filter(ciphers::user_uuid.eq(user_uuid))
- .or_filter(ciphers::organization_uuid.eq_any(org_uuids))
- .select(attachments::all_columns)
- .load::<AttachmentDb>(conn)
- .expect("Error loading attachments")
- .from_db()
- }}
- }
-}
diff --git a/src/db/models/auth_request.rs b/src/db/models/auth_request.rs
@@ -9,25 +9,19 @@ db_object! {
pub struct AuthRequest {
pub uuid: String,
pub user_uuid: String,
- pub organization_uuid: Option<String>,
-
- pub request_device_identifier: String,
+ organization_uuid: Option<String>,
+ request_device_identifier: String,
pub device_type: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
-
pub request_ip: String,
pub response_device_id: Option<String>,
-
- pub access_code: String,
+ access_code: String,
pub public_key: String,
-
pub enc_key: Option<String>,
-
pub master_password_hash: Option<String>,
pub approved: Option<bool>,
pub creation_date: NaiveDateTime,
pub response_date: Option<NaiveDateTime>,
-
- pub authentication_date: Option<NaiveDateTime>,
+ authentication_date: Option<NaiveDateTime>,
}
}
@@ -69,9 +63,9 @@ use crate::api::EmptyResult;
use crate::error::MapResult;
impl AuthRequest {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(auth_requests::table)
.values(AuthRequestDb::to_db(self))
.execute(conn)
@@ -88,20 +82,10 @@ impl AuthRequest {
Err(e) => Err(e.into()),
}.map_res("Error auth_request")
}
- postgresql {
- let value = AuthRequestDb::to_db(self);
- diesel::insert_into(auth_requests::table)
- .values(&value)
- .on_conflict(auth_requests::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving auth_request")
- }
}
}
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! {conn: {
auth_requests::table
.filter(auth_requests::uuid.eq(uuid))
@@ -111,38 +95,14 @@ impl AuthRequest {
}}
}
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
auth_requests::table
.filter(auth_requests::user_uuid.eq(user_uuid))
.load::<AuthRequestDb>(conn).expect("Error loading auth_requests").from_db()
}}
}
-
- pub async fn find_created_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
- db_run! {conn: {
- auth_requests::table
- .filter(auth_requests::creation_date.lt(dt))
- .load::<AuthRequestDb>(conn).expect("Error loading auth_requests").from_db()
- }}
- }
-
- pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn: {
- diesel::delete(auth_requests::table.filter(auth_requests::uuid.eq(&self.uuid)))
- .execute(conn)
- .map_res("Error deleting auth request")
- }}
- }
-
pub fn check_access_code(&self, access_code: &str) -> bool {
ct_eq(&self.access_code, access_code)
}
-
- pub async fn purge_expired_auth_requests(conn: &mut DbConn) {
- let expiry_time = Utc::now().naive_utc() - chrono::Duration::minutes(5); //after 5 minutes, clients reject the request
- for auth_request in Self::find_created_before(&expiry_time, conn).await {
- auth_request.delete(conn).await.ok();
- }
- }
}
diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs
@@ -1,13 +1,9 @@
-use chrono::{NaiveDateTime, Utc};
-use serde_json::Value;
-
use super::{
- Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType,
- UserOrganization,
+ CollectionCipher, Favorite, FolderCipher, User, UserOrgStatus, UserOrgType, UserOrganization,
};
-
use crate::api::core::{CipherData, CipherSyncData, CipherSyncType};
-
+use chrono::{NaiveDateTime, Utc};
+use serde_json::Value;
use std::borrow::Cow;
db_object! {
@@ -17,27 +13,16 @@ db_object! {
#[diesel(primary_key(uuid))]
pub struct Cipher {
pub uuid: String,
- pub created_at: NaiveDateTime,
+ created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
-
pub user_uuid: Option<String>,
pub organization_uuid: Option<String>,
-
pub key: Option<String>,
-
- /*
- Login = 1,
- SecureNote = 2,
- Card = 3,
- Identity = 4
- */
- pub atype: i32,
+ atype: i32,
pub name: String,
pub notes: Option<String>,
pub fields: Option<String>,
-
pub data: String,
-
pub password_history: Option<String>,
pub deleted_at: Option<NaiveDateTime>,
pub reprompt: Option<i32>,
@@ -45,7 +30,7 @@ db_object! {
}
#[allow(dead_code)]
-pub enum RepromptType {
+enum RepromptType {
None = 0,
Password = 1, // not currently used in server
}
@@ -100,41 +85,23 @@ impl Cipher {
"object": "error"
});
err_json!(err_json, "Import validation errors")
- } else {
- Ok(())
}
+ Ok(())
}
}
-
-use crate::db::DbConn;
-
use crate::api::EmptyResult;
+use crate::db::DbConn;
use crate::error::MapResult;
-
/// Database methods
impl Cipher {
pub async fn to_json(
&self,
- host: &str,
user_uuid: &str,
cipher_sync_data: Option<&CipherSyncData>,
sync_type: CipherSyncType,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Value {
use crate::util::format_date;
-
- let mut attachments_json: Value = Value::Null;
- if let Some(cipher_sync_data) = cipher_sync_data {
- if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
- attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
- }
- } else {
- let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
- if !attachments.is_empty() {
- attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
- }
- }
-
let fields_json = self
.fields
.as_ref()
@@ -186,13 +153,11 @@ impl Cipher {
data_json["Name"] = json!(self.name);
data_json["Notes"] = json!(self.notes);
data_json["PasswordHistory"] = password_history_json.clone();
-
let collection_ids = if let Some(cipher_sync_data) = cipher_sync_data {
- if let Some(cipher_collections) = cipher_sync_data.cipher_collections.get(&self.uuid) {
- Cow::from(cipher_collections)
- } else {
- Cow::from(Vec::with_capacity(0))
- }
+ cipher_sync_data
+ .cipher_collections
+ .get(&self.uuid)
+ .map_or_else(|| Cow::from(Vec::with_capacity(0)), Cow::from)
} else {
Cow::from(self.get_collections(user_uuid.to_string(), conn).await)
};
@@ -214,7 +179,7 @@ impl Cipher {
"Reprompt": self.reprompt.unwrap_or(RepromptType::None as i32),
"OrganizationId": self.organization_uuid,
"Key": self.key,
- "Attachments": attachments_json,
+ "Attachments": Value::Null,
// We have UseTotp set to true by default within the Organization model.
// This variable together with UsersGetPremium is used to show or hide the TOTP counter.
"OrganizationUseTotp": true,
@@ -245,7 +210,7 @@ impl Cipher {
cipher_sync_data
.cipher_folders
.get(&self.uuid)
- .map(|c| c.to_string())
+ .map(std::string::ToString::to_string)
} else {
self.get_folder_uuid(user_uuid, conn).await
});
@@ -273,23 +238,21 @@ impl Cipher {
json_object
}
- pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> {
+ pub async fn update_users_revision(&self, conn: &DbConn) -> Vec<String> {
let mut user_uuids = Vec::new();
match self.user_uuid {
Some(ref user_uuid) => {
User::update_uuid_revision(user_uuid, conn).await;
- user_uuids.push(user_uuid.clone())
+ user_uuids.push(user_uuid.clone());
}
None => {
// Belongs to Organization, need to update affected users
if let Some(ref org_uuid) = self.organization_uuid {
for user_org in
- UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn)
- .await
- .iter()
+ &UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await
{
User::update_uuid_revision(&user_org.user_uuid, conn).await;
- user_uuids.push(user_org.user_uuid.clone())
+ user_uuids.push(user_org.user_uuid.clone());
}
}
}
@@ -297,12 +260,12 @@ impl Cipher {
user_uuids
}
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
self.update_users_revision(conn).await;
self.updated_at = Utc::now().naive_utc();
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(ciphers::table)
.values(CipherDb::to_db(self))
.execute(conn)
@@ -319,25 +282,14 @@ impl Cipher {
Err(e) => Err(e.into()),
}.map_res("Error saving cipher")
}
- postgresql {
- let value = CipherDb::to_db(self);
- diesel::insert_into(ciphers::table)
- .values(&value)
- .on_conflict(ciphers::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving cipher")
- }
}
}
- pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
self.update_users_revision(conn).await;
FolderCipher::delete_all_by_cipher(&self.uuid, conn).await?;
CollectionCipher::delete_all_by_cipher(&self.uuid, conn).await?;
- Attachment::delete_all_by_cipher(&self.uuid, conn).await?;
Favorite::delete_all_by_cipher(&self.uuid, conn).await?;
db_run! { conn: {
@@ -347,7 +299,7 @@ impl Cipher {
}}
}
- pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
// TODO: Optimize this by executing a DELETE directly on the database, instead of first fetching.
for cipher in Self::find_by_org(org_uuid, conn).await {
cipher.delete(conn).await?;
@@ -355,7 +307,7 @@ impl Cipher {
Ok(())
}
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
for cipher in Self::find_owned_by_user(user_uuid, conn).await {
cipher.delete(conn).await?;
}
@@ -366,7 +318,7 @@ impl Cipher {
&self,
folder_uuid: Option<String>,
user_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
@@ -399,7 +351,7 @@ impl Cipher {
}
/// Returns whether this cipher is directly owned by the user.
- pub fn is_owned_by_user(&self, user_uuid: &str) -> bool {
+ fn is_owned_by_user(&self, user_uuid: &str) -> bool {
self.user_uuid.is_some() && self.user_uuid.as_ref().unwrap() == user_uuid
}
@@ -408,7 +360,7 @@ impl Cipher {
&self,
user_uuid: &str,
cipher_sync_data: Option<&CipherSyncData>,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> bool {
if let Some(ref org_uuid) = self.organization_uuid {
if let Some(cipher_sync_data) = cipher_sync_data {
@@ -423,37 +375,16 @@ impl Cipher {
}
false
}
-
- /// Returns whether this cipher is owned by an group in which the user has full access.
- async fn is_in_full_access_group(
- &self,
- user_uuid: &str,
- cipher_sync_data: Option<&CipherSyncData>,
- conn: &mut DbConn,
- ) -> bool {
- if let Some(ref org_uuid) = self.organization_uuid {
- if let Some(cipher_sync_data) = cipher_sync_data {
- return cipher_sync_data
- .user_group_full_access_for_organizations
- .get(org_uuid)
- .is_some();
- } else {
- return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await;
- }
- }
- false
- }
-
/// Returns the user's access restrictions to this cipher. A return value
/// of None means that this cipher does not belong to the user, and is
/// not in any collection the user has access to. Otherwise, the user has
/// access to this cipher, and Some(read_only, hide_passwords) represents
/// the access restrictions.
- pub async fn get_access_restrictions(
+ async fn get_access_restrictions(
&self,
user_uuid: &str,
cipher_sync_data: Option<&CipherSyncData>,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Option<(bool, bool)> {
// Check whether this cipher is directly owned by the user, or is in
// a collection that the user has full access to. If so, there are no
@@ -462,9 +393,6 @@ impl Cipher {
|| self
.is_in_full_access_org(user_uuid, cipher_sync_data, conn)
.await
- || self
- .is_in_full_access_group(user_uuid, cipher_sync_data, conn)
- .await
{
return Some((false, false));
}
@@ -477,24 +405,12 @@ impl Cipher {
if let Some(uc) = cipher_sync_data.user_collections.get(collection) {
rows.push((uc.read_only, uc.hide_passwords));
}
-
- //Group permissions
- if let Some(cg) = cipher_sync_data.user_collections_groups.get(collection) {
- rows.push((cg.read_only, cg.hide_passwords));
- }
}
}
rows
} else {
- let mut access_flags = self
- .get_user_collections_access_flags(user_uuid, conn)
- .await;
- access_flags.append(
- &mut self
- .get_group_collections_access_flags(user_uuid, conn)
- .await,
- );
- access_flags
+ self.get_user_collections_access_flags(user_uuid, conn)
+ .await
};
if rows.is_empty() {
@@ -513,7 +429,7 @@ impl Cipher {
// booleans and this behavior isn't portable anyway.
let mut read_only = true;
let mut hide_passwords = true;
- for (ro, hp) in rows.iter() {
+ for (ro, hp) in &rows {
read_only &= ro;
hide_passwords &= hp;
}
@@ -524,7 +440,7 @@ impl Cipher {
async fn get_user_collections_access_flags(
&self,
user_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<(bool, bool)> {
db_run! {conn: {
// Check whether this cipher is in any collections accessible to the
@@ -542,48 +458,21 @@ impl Cipher {
}}
}
- async fn get_group_collections_access_flags(
- &self,
- user_uuid: &str,
- conn: &mut DbConn,
- ) -> Vec<(bool, bool)> {
- db_run! {conn: {
- ciphers::table
- .filter(ciphers::uuid.eq(&self.uuid))
- .inner_join(ciphers_collections::table.on(
- ciphers::uuid.eq(ciphers_collections::cipher_uuid)
- ))
- .inner_join(collections_groups::table.on(
- collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid)
- ))
- .inner_join(groups_users::table.on(
- groups_users::groups_uuid.eq(collections_groups::groups_uuid)
- ))
- .inner_join(users_organizations::table.on(
- users_organizations::uuid.eq(groups_users::users_organizations_uuid)
- ))
- .filter(users_organizations::user_uuid.eq(user_uuid))
- .select((collections_groups::read_only, collections_groups::hide_passwords))
- .load::<(bool, bool)>(conn)
- .expect("Error getting group access restrictions")
- }}
- }
-
- pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
+ pub async fn is_write_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
match self.get_access_restrictions(user_uuid, None, conn).await {
Some((read_only, _hide_passwords)) => !read_only,
None => false,
}
}
- pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
+ pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
self.get_access_restrictions(user_uuid, None, conn)
.await
.is_some()
}
// Returns whether this cipher is a favorite of the specified user.
- pub async fn is_favorite(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
+ async fn is_favorite(&self, user_uuid: &str, conn: &DbConn) -> bool {
Favorite::is_favorite(&self.uuid, user_uuid, conn).await
}
@@ -592,7 +481,7 @@ impl Cipher {
&self,
favorite: Option<bool>,
user_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
match favorite {
None => Ok(()), // No change requested.
@@ -600,7 +489,7 @@ impl Cipher {
}
}
- pub async fn get_folder_uuid(&self, user_uuid: &str, conn: &mut DbConn) -> Option<String> {
+ async fn get_folder_uuid(&self, user_uuid: &str, conn: &DbConn) -> Option<String> {
db_run! {conn: {
folders_ciphers::table
.inner_join(folders::table)
@@ -612,7 +501,7 @@ impl Cipher {
}}
}
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! {conn: {
ciphers::table
.filter(ciphers::uuid.eq(uuid))
@@ -634,7 +523,7 @@ impl Cipher {
// true, then the non-interesting ciphers will not be returned. As a
// result, those ciphers will not appear in "My Vault" for the org
// owner/admin, but they can still be accessed via the org vault view.
- pub async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
+ async fn find_by_user(user_uuid: &str, visible_only: bool, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
let mut query = ciphers::table
.left_join(ciphers_collections::table.on(
@@ -650,22 +539,9 @@ impl Cipher {
// Ensure that users_collections::user_uuid is NULL for unconfirmed users.
.and(users_organizations::user_uuid.eq(users_collections::user_uuid))
))
- .left_join(groups_users::table.on(
- groups_users::users_organizations_uuid.eq(users_organizations::uuid)
- ))
- .left_join(groups::table.on(
- groups::uuid.eq(groups_users::groups_uuid)
- ))
- .left_join(collections_groups::table.on(
- collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
- collections_groups::groups_uuid.eq(groups::uuid)
- )
- ))
.filter(ciphers::user_uuid.eq(user_uuid)) // Cipher owner
.or_filter(users_organizations::access_all.eq(true)) // access_all in org
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
- .or_filter(groups::access_all.eq(true)) // Access via groups
- .or_filter(collections_groups::collections_uuid.is_not_null()) // Access via groups
.into_boxed();
if !visible_only {
@@ -682,12 +558,12 @@ impl Cipher {
}
// Find all ciphers visible to the specified user.
- pub async fn find_by_user_visible(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_user_visible(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
Self::find_by_user(user_uuid, true, conn).await
}
// Find all ciphers directly owned by the specified user.
- pub async fn find_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_owned_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
ciphers::table
.filter(
@@ -698,7 +574,7 @@ impl Cipher {
}}
}
- pub async fn count_owned_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
+ pub async fn count_owned_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
db_run! {conn: {
ciphers::table
.filter(ciphers::user_uuid.eq(user_uuid))
@@ -709,7 +585,7 @@ impl Cipher {
}}
}
- pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
ciphers::table
.filter(ciphers::organization_uuid.eq(org_uuid))
@@ -717,7 +593,7 @@ impl Cipher {
}}
}
- pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
+ pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
db_run! {conn: {
ciphers::table
.filter(ciphers::organization_uuid.eq(org_uuid))
@@ -728,7 +604,7 @@ impl Cipher {
}}
}
- pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_folder(folder_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
folders_ciphers::table.inner_join(ciphers::table)
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
@@ -738,7 +614,7 @@ impl Cipher {
}
/// Find all ciphers that were deleted before the specified datetime.
- pub async fn find_deleted_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_deleted_before(dt: &NaiveDateTime, conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
ciphers::table
.filter(ciphers::deleted_at.lt(dt))
@@ -746,7 +622,7 @@ impl Cipher {
}}
}
- pub async fn get_collections(&self, user_id: String, conn: &mut DbConn) -> Vec<String> {
+ pub async fn get_collections(&self, user_id: String, conn: &DbConn) -> Vec<String> {
db_run! {conn: {
ciphers_collections::table
.inner_join(collections::table.on(
@@ -777,7 +653,7 @@ impl Cipher {
/// This is used during a full sync so we only need one query for all collections accessible.
pub async fn get_collections_with_cipher_by_user(
user_id: String,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<(String, String)> {
db_run! {conn: {
ciphers_collections::table
@@ -794,22 +670,9 @@ impl Cipher {
users_collections::user_uuid.eq(user_id.clone())
)
))
- .left_join(groups_users::table.on(
- groups_users::users_organizations_uuid.eq(users_organizations::uuid)
- ))
- .left_join(groups::table.on(
- groups::uuid.eq(groups_users::groups_uuid)
- ))
- .left_join(collections_groups::table.on(
- collections_groups::collections_uuid.eq(ciphers_collections::collection_uuid).and(
- collections_groups::groups_uuid.eq(groups::uuid)
- )
- ))
.or_filter(users_collections::user_uuid.eq(user_id)) // User has access to collection
.or_filter(users_organizations::access_all.eq(true)) // User has access all
.or_filter(users_organizations::atype.le(UserOrgType::Admin as i32)) // User is admin or owner
- .or_filter(groups::access_all.eq(true)) //Access via group
- .or_filter(collections_groups::collections_uuid.is_not_null()) //Access via group
.select(ciphers_collections::all_columns)
.distinct()
.load::<(String, String)>(conn).unwrap_or_default()
diff --git a/src/db/models/collection.rs b/src/db/models/collection.rs
@@ -1,7 +1,6 @@
+use super::{User, UserOrgStatus, UserOrgType, UserOrganization};
use serde_json::Value;
-use super::{CollectionGroup, User, UserOrgStatus, UserOrgType, UserOrganization};
-
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = collections)]
@@ -27,8 +26,8 @@ db_object! {
#[diesel(table_name = ciphers_collections)]
#[diesel(primary_key(cipher_uuid, collection_uuid))]
pub struct CollectionCipher {
- pub cipher_uuid: String,
- pub collection_uuid: String,
+ cipher_uuid: String,
+ collection_uuid: String,
}
}
@@ -56,7 +55,7 @@ impl Collection {
})
}
- pub fn set_external_id(&mut self, external_id: Option<String>) {
+ fn set_external_id(&mut self, external_id: Option<String>) {
//Check if external id is empty. We don't want to have
//empty strings in the database
match external_id {
@@ -64,7 +63,7 @@ impl Collection {
if external_id.is_empty() {
self.external_id = None;
} else {
- self.external_id = Some(external_id)
+ self.external_id = Some(external_id);
}
}
None => self.external_id = None,
@@ -75,24 +74,22 @@ impl Collection {
&self,
user_uuid: &str,
cipher_sync_data: Option<&crate::api::core::CipherSyncData>,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Value {
let (read_only, hide_passwords) = if let Some(cipher_sync_data) = cipher_sync_data {
match cipher_sync_data.user_organizations.get(&self.org_uuid) {
Some(uo) if uo.has_full_access() => (false, false),
- Some(_) => {
- if let Some(uc) = cipher_sync_data.user_collections.get(&self.uuid) {
- (uc.read_only, uc.hide_passwords)
- } else if let Some(cg) = cipher_sync_data.user_collections_groups.get(&self.uuid) {
- (cg.read_only, cg.hide_passwords)
- } else {
- (false, false)
- }
- }
+ Some(_) => cipher_sync_data
+ .user_collections
+ .get(&self.uuid)
+ .map_or((false, false), |uc| (uc.read_only, uc.hide_passwords)),
_ => (true, true),
}
} else {
- (!self.is_writable_by_user(user_uuid, conn).await, self.hide_passwords_for_user(user_uuid, conn).await)
+ (
+ !self.is_writable_by_user(user_uuid, conn).await,
+ self.hide_passwords_for_user(user_uuid, conn).await,
+ )
};
let mut json_object = self.to_json();
@@ -110,11 +107,11 @@ use crate::error::MapResult;
/// Database methods
impl Collection {
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&self, conn: &DbConn) -> EmptyResult {
self.update_users_revision(conn).await;
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(collections::table)
.values(CollectionDb::to_db(self))
.execute(conn)
@@ -131,24 +128,13 @@ impl Collection {
Err(e) => Err(e.into()),
}.map_res("Error saving collection")
}
- postgresql {
- let value = CollectionDb::to_db(self);
- diesel::insert_into(collections::table)
- .values(&value)
- .on_conflict(collections::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving collection")
- }
}
}
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(self, conn: &DbConn) -> EmptyResult {
self.update_users_revision(conn).await;
CollectionCipher::delete_all_by_collection(&self.uuid, conn).await?;
CollectionUser::delete_all_by_collection(&self.uuid, conn).await?;
- CollectionGroup::delete_all_by_collection(&self.uuid, conn).await?;
db_run! { conn: {
diesel::delete(collections::table.filter(collections::uuid.eq(self.uuid)))
@@ -157,20 +143,22 @@ impl Collection {
}}
}
- pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
for collection in Self::find_by_organization(org_uuid, conn).await {
collection.delete(conn).await?;
}
Ok(())
}
- pub async fn update_users_revision(&self, conn: &mut DbConn) {
- for user_org in UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await.iter() {
+ async fn update_users_revision(&self, conn: &DbConn) {
+ for user_org in
+ &UserOrganization::find_by_collection_and_org(&self.uuid, &self.org_uuid, conn).await
+ {
User::update_uuid_revision(&user_org.user_uuid, conn).await;
}
}
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
collections::table
.filter(collections::uuid.eq(uuid))
@@ -180,7 +168,7 @@ impl Collection {
}}
}
- pub async fn find_by_user_uuid(user_uuid: String, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_user_uuid(user_uuid: String, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
collections::table
.left_join(users_collections::table.on(
@@ -193,29 +181,12 @@ impl Collection {
users_organizations::user_uuid.eq(user_uuid.clone())
)
))
- .left_join(groups_users::table.on(
- groups_users::users_organizations_uuid.eq(users_organizations::uuid)
- ))
- .left_join(groups::table.on(
- groups::uuid.eq(groups_users::groups_uuid)
- ))
- .left_join(collections_groups::table.on(
- collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
- collections_groups::collections_uuid.eq(collections::uuid)
- )
- ))
.filter(
users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
)
.filter(
users_collections::user_uuid.eq(user_uuid).or( // Directly accessed collection
users_organizations::access_all.eq(true) // access_all in Organization
- ).or(
- groups::access_all.eq(true) // access_all in groups
- ).or( // access via groups
- groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
- collections_groups::collections_uuid.is_not_null()
- )
)
)
.select(collections::all_columns)
@@ -230,12 +201,19 @@ impl Collection {
pub async fn has_access_by_collection_and_user_uuid(
collection_uuid: &str,
user_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> bool {
- Self::find_by_user_uuid(user_uuid.to_owned(), conn).await.into_iter().any(|c| c.uuid == collection_uuid)
+ Self::find_by_user_uuid(user_uuid.to_owned(), conn)
+ .await
+ .into_iter()
+ .any(|c| c.uuid == collection_uuid)
}
- pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_organization_and_user_uuid(
+ org_uuid: &str,
+ user_uuid: &str,
+ conn: &DbConn,
+ ) -> Vec<Self> {
Self::find_by_user_uuid(user_uuid.to_owned(), conn)
.await
.into_iter()
@@ -243,7 +221,7 @@ impl Collection {
.collect()
}
- pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
collections::table
.filter(collections::org_uuid.eq(org_uuid))
@@ -253,7 +231,7 @@ impl Collection {
}}
}
- pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
+ pub async fn count_by_org(org_uuid: &str, conn: &DbConn) -> i64 {
db_run! { conn: {
collections::table
.filter(collections::org_uuid.eq(org_uuid))
@@ -264,7 +242,7 @@ impl Collection {
}}
}
- pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
collections::table
.filter(collections::uuid.eq(uuid))
@@ -276,7 +254,11 @@ impl Collection {
}}
}
- pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: String, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid_and_user(
+ uuid: &str,
+ user_uuid: String,
+ conn: &DbConn,
+ ) -> Option<Self> {
db_run! { conn: {
collections::table
.left_join(users_collections::table.on(
@@ -289,36 +271,18 @@ impl Collection {
users_organizations::user_uuid.eq(user_uuid)
)
))
- .left_join(groups_users::table.on(
- groups_users::users_organizations_uuid.eq(users_organizations::uuid)
- ))
- .left_join(groups::table.on(
- groups::uuid.eq(groups_users::groups_uuid)
- ))
- .left_join(collections_groups::table.on(
- collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
- collections_groups::collections_uuid.eq(collections::uuid)
- )
- ))
.filter(collections::uuid.eq(uuid))
.filter(
users_collections::collection_uuid.eq(uuid).or( // Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
- )).or(
- groups::access_all.eq(true) // access_all in groups
- ).or( // access via groups
- groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
- collections_groups::collections_uuid.is_not_null()
- )
- )
- ).select(collections::all_columns)
+ ))).select(collections::all_columns)
.first::<CollectionDb>(conn).ok()
.from_db()
}}
}
- pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
+ pub async fn is_writable_by_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
let user_uuid = user_uuid.to_string();
db_run! { conn: {
collections::table
@@ -332,30 +296,12 @@ impl Collection {
users_organizations::user_uuid.eq(user_uuid)
)
))
- .left_join(groups_users::table.on(
- groups_users::users_organizations_uuid.eq(users_organizations::uuid)
- ))
- .left_join(groups::table.on(
- groups::uuid.eq(groups_users::groups_uuid)
- ))
- .left_join(collections_groups::table.on(
- collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
- collections_groups::collections_uuid.eq(collections::uuid)
- )
- ))
.filter(collections::uuid.eq(&self.uuid))
.filter(
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::read_only.eq(false)).or(// Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
- )).or(
- groups::access_all.eq(true) // access_all in groups
- ).or( // access via groups
- groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
- collections_groups::collections_uuid.is_not_null().and(
- collections_groups::read_only.eq(false))
- )
- )
+ ))
)
.count()
.first::<i64>(conn)
@@ -364,7 +310,7 @@ impl Collection {
}}
}
- pub async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool {
+ async fn hide_passwords_for_user(&self, user_uuid: &str, conn: &DbConn) -> bool {
let user_uuid = user_uuid.to_string();
db_run! { conn: {
collections::table
@@ -378,30 +324,12 @@ impl Collection {
users_organizations::user_uuid.eq(user_uuid)
)
))
- .left_join(groups_users::table.on(
- groups_users::users_organizations_uuid.eq(users_organizations::uuid)
- ))
- .left_join(groups::table.on(
- groups::uuid.eq(groups_users::groups_uuid)
- ))
- .left_join(collections_groups::table.on(
- collections_groups::groups_uuid.eq(groups_users::groups_uuid).and(
- collections_groups::collections_uuid.eq(collections::uuid)
- )
- ))
.filter(collections::uuid.eq(&self.uuid))
.filter(
users_collections::collection_uuid.eq(&self.uuid).and(users_collections::hide_passwords.eq(true)).or(// Directly accessed collection
users_organizations::access_all.eq(true).or( // access_all in Organization
users_organizations::atype.le(UserOrgType::Admin as i32) // Org admin or owner
- )).or(
- groups::access_all.eq(true) // access_all in groups
- ).or( // access via groups
- groups_users::users_organizations_uuid.eq(users_organizations::uuid).and(
- collections_groups::collections_uuid.is_not_null().and(
- collections_groups::hide_passwords.eq(true))
- )
- )
+ ))
)
.count()
.first::<i64>(conn)
@@ -413,7 +341,11 @@ impl Collection {
/// Database methods
impl CollectionUser {
- pub async fn find_by_organization_and_user_uuid(org_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_organization_and_user_uuid(
+ org_uuid: &str,
+ user_uuid: &str,
+ conn: &DbConn,
+ ) -> Vec<Self> {
db_run! { conn: {
users_collections::table
.filter(users_collections::user_uuid.eq(user_uuid))
@@ -426,7 +358,7 @@ impl CollectionUser {
}}
}
- pub async fn find_by_organization(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_organization(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_collections::table
.inner_join(collections::table.on(collections::uuid.eq(users_collections::collection_uuid)))
@@ -444,12 +376,12 @@ impl CollectionUser {
collection_uuid: &str,
read_only: bool,
hide_passwords: bool,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> EmptyResult {
User::update_uuid_revision(user_uuid, conn).await;
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(users_collections::table)
.values((
users_collections::user_uuid.eq(user_uuid),
@@ -477,27 +409,10 @@ impl CollectionUser {
Err(e) => Err(e.into()),
}.map_res("Error adding user to collection")
}
- postgresql {
- diesel::insert_into(users_collections::table)
- .values((
- users_collections::user_uuid.eq(user_uuid),
- users_collections::collection_uuid.eq(collection_uuid),
- users_collections::read_only.eq(read_only),
- users_collections::hide_passwords.eq(hide_passwords),
- ))
- .on_conflict((users_collections::user_uuid, users_collections::collection_uuid))
- .do_update()
- .set((
- users_collections::read_only.eq(read_only),
- users_collections::hide_passwords.eq(hide_passwords),
- ))
- .execute(conn)
- .map_res("Error adding user to collection")
- }
}
}
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
db_run! { conn: {
@@ -511,7 +426,7 @@ impl CollectionUser {
}}
}
- pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_collection(collection_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_collections::table
.filter(users_collections::collection_uuid.eq(collection_uuid))
@@ -524,7 +439,7 @@ impl CollectionUser {
pub async fn find_by_collection_swap_user_uuid_with_org_user_uuid(
collection_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<Self> {
db_run! { conn: {
users_collections::table
@@ -540,7 +455,7 @@ impl CollectionUser {
pub async fn find_by_collection_and_user(
collection_uuid: &str,
user_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Option<Self> {
db_run! { conn: {
users_collections::table
@@ -553,7 +468,7 @@ impl CollectionUser {
}}
}
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_collections::table
.filter(users_collections::user_uuid.eq(user_uuid))
@@ -564,8 +479,8 @@ impl CollectionUser {
}}
}
- pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- for collection in CollectionUser::find_by_collection(collection_uuid, conn).await.iter() {
+ pub async fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
+ for collection in &Self::find_by_collection(collection_uuid, conn).await {
User::update_uuid_revision(&collection.user_uuid, conn).await;
}
@@ -576,8 +491,13 @@ impl CollectionUser {
}}
}
- pub async fn delete_all_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- let collectionusers = Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
+ pub async fn delete_all_by_user_and_org(
+ user_uuid: &str,
+ org_uuid: &str,
+ conn: &DbConn,
+ ) -> EmptyResult {
+ let collectionusers =
+ Self::find_by_organization_and_user_uuid(org_uuid, user_uuid, conn).await;
db_run! { conn: {
for user in collectionusers {
@@ -595,11 +515,11 @@ impl CollectionUser {
/// Database methods
impl CollectionCipher {
- pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
Self::update_users_revision(collection_uuid, conn).await;
db_run! { conn:
- sqlite, mysql {
+ {
// Not checking for ForeignKey Constraints here.
// Table ciphers_collections does not have ForeignKey Constraints which would cause conflicts.
// This table has no constraints pointing to itself, but only to others.
@@ -611,21 +531,10 @@ impl CollectionCipher {
.execute(conn)
.map_res("Error adding cipher to collection")
}
- postgresql {
- diesel::insert_into(ciphers_collections::table)
- .values((
- ciphers_collections::cipher_uuid.eq(cipher_uuid),
- ciphers_collections::collection_uuid.eq(collection_uuid),
- ))
- .on_conflict((ciphers_collections::cipher_uuid, ciphers_collections::collection_uuid))
- .do_nothing()
- .execute(conn)
- .map_res("Error adding cipher to collection")
- }
}
}
- pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(cipher_uuid: &str, collection_uuid: &str, conn: &DbConn) -> EmptyResult {
Self::update_users_revision(collection_uuid, conn).await;
db_run! { conn: {
@@ -639,7 +548,7 @@ impl CollectionCipher {
}}
}
- pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(ciphers_collections::table.filter(ciphers_collections::cipher_uuid.eq(cipher_uuid)))
.execute(conn)
@@ -647,7 +556,7 @@ impl CollectionCipher {
}}
}
- pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ async fn delete_all_by_collection(collection_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(ciphers_collections::table.filter(ciphers_collections::collection_uuid.eq(collection_uuid)))
.execute(conn)
@@ -655,7 +564,7 @@ impl CollectionCipher {
}}
}
- pub async fn update_users_revision(collection_uuid: &str, conn: &mut DbConn) {
+ async fn update_users_revision(collection_uuid: &str, conn: &DbConn) {
if let Some(collection) = Collection::find_by_uuid(collection_uuid, conn).await {
collection.update_users_revision(conn).await;
}
diff --git a/src/db/models/device.rs b/src/db/models/device.rs
@@ -1,6 +1,5 @@
-use chrono::{NaiveDateTime, Utc};
-
use crate::crypto;
+use chrono::{NaiveDateTime, Utc};
use core::fmt;
db_object! {
@@ -10,19 +9,15 @@ db_object! {
#[diesel(primary_key(uuid, user_uuid))]
pub struct Device {
pub uuid: String,
- pub created_at: NaiveDateTime,
+ created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
-
pub user_uuid: String,
-
- pub name: String,
- pub atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
- pub push_uuid: Option<String>,
- pub push_token: Option<String>,
-
+ name: String,
+ atype: i32, // https://github.com/bitwarden/server/blob/master/src/Core/Enums/DeviceType.cs
+ push_uuid: Option<String>,
+ push_token: Option<String>,
pub refresh_token: String,
-
- pub twofactor_remember: Option<String>,
+ twofactor_remember: Option<String>,
}
}
@@ -47,14 +42,6 @@ impl Device {
}
}
- pub fn refresh_twofactor_remember(&mut self) -> String {
- use data_encoding::BASE64;
- let twofactor_remember = crypto::encode_random_bytes::<180>(BASE64);
- self.twofactor_remember = Some(twofactor_remember.clone());
-
- twofactor_remember
- }
-
pub fn delete_twofactor_remember(&mut self) {
self.twofactor_remember = None;
}
@@ -134,27 +121,20 @@ use crate::error::MapResult;
/// Database methods
impl Device {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
self.updated_at = Utc::now().naive_utc();
db_run! { conn:
- sqlite, mysql {
+ {
crate::util::retry(
|| diesel::replace_into(devices::table).values(DeviceDb::to_db(self)).execute(conn),
10,
).map_res("Error saving device")
}
- postgresql {
- let value = DeviceDb::to_db(self);
- crate::util::retry(
- || diesel::insert_into(devices::table).values(&value).on_conflict((devices::uuid, devices::user_uuid)).do_update().set(&value).execute(conn),
- 10,
- ).map_res("Error saving device")
- }
}
}
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(devices::table.filter(devices::user_uuid.eq(user_uuid)))
.execute(conn)
@@ -162,11 +142,7 @@ impl Device {
}}
}
- pub async fn find_by_uuid_and_user(
- uuid: &str,
- user_uuid: &str,
- conn: &mut DbConn,
- ) -> Option<Self> {
+ pub async fn find_by_uuid_and_user(uuid: &str, user_uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
devices::table
.filter(devices::uuid.eq(uuid))
@@ -177,36 +153,7 @@ impl Device {
}}
}
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- devices::table
- .filter(devices::user_uuid.eq(user_uuid))
- .load::<DeviceDb>(conn)
- .expect("Error loading devices")
- .from_db()
- }}
- }
-
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
- db_run! { conn: {
- devices::table
- .filter(devices::uuid.eq(uuid))
- .first::<DeviceDb>(conn)
- .ok()
- .from_db()
- }}
- }
-
- pub async fn clear_push_token_by_uuid(uuid: &str, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn: {
- diesel::update(devices::table)
- .filter(devices::uuid.eq(uuid))
- .set(devices::push_token.eq::<Option<String>>(None))
- .execute(conn)
- .map_res("Error removing push token")
- }}
- }
- pub async fn find_by_refresh_token(refresh_token: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_refresh_token(refresh_token: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
devices::table
.filter(devices::refresh_token.eq(refresh_token))
@@ -216,7 +163,7 @@ impl Device {
}}
}
- pub async fn find_latest_active_by_user(user_uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_latest_active_by_user(user_uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
devices::table
.filter(devices::user_uuid.eq(user_uuid))
@@ -226,28 +173,6 @@ impl Device {
.from_db()
}}
}
- pub async fn find_push_devices_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- devices::table
- .filter(devices::user_uuid.eq(user_uuid))
- .filter(devices::push_token.is_not_null())
- .load::<DeviceDb>(conn)
- .expect("Error loading push devices")
- .from_db()
- }}
- }
-
- pub async fn check_user_has_push_device(user_uuid: &str, conn: &mut DbConn) -> bool {
- db_run! { conn: {
- devices::table
- .filter(devices::user_uuid.eq(user_uuid))
- .filter(devices::push_token.is_not_null())
- .count()
- .first::<i64>(conn)
- .ok()
- .unwrap_or(0) != 0
- }}
- }
}
pub enum DeviceType {
@@ -279,60 +204,59 @@ pub enum DeviceType {
impl fmt::Display for DeviceType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
- DeviceType::Android => write!(f, "Android"),
- DeviceType::Ios => write!(f, "iOS"),
- DeviceType::ChromeExtension => write!(f, "Chrome Extension"),
- DeviceType::FirefoxExtension => write!(f, "Firefox Extension"),
- DeviceType::OperaExtension => write!(f, "Opera Extension"),
- DeviceType::EdgeExtension => write!(f, "Edge Extension"),
- DeviceType::WindowsDesktop => write!(f, "Windows Desktop"),
- DeviceType::MacOsDesktop => write!(f, "MacOS Desktop"),
- DeviceType::LinuxDesktop => write!(f, "Linux Desktop"),
- DeviceType::ChromeBrowser => write!(f, "Chrome Browser"),
- DeviceType::FirefoxBrowser => write!(f, "Firefox Browser"),
- DeviceType::OperaBrowser => write!(f, "Opera Browser"),
- DeviceType::EdgeBrowser => write!(f, "Edge Browser"),
- DeviceType::IEBrowser => write!(f, "Internet Explorer"),
- DeviceType::UnknownBrowser => write!(f, "Unknown Browser"),
- DeviceType::AndroidAmazon => write!(f, "Android Amazon"),
- DeviceType::Uwp => write!(f, "UWP"),
- DeviceType::SafariBrowser => write!(f, "Safari Browser"),
- DeviceType::VivaldiBrowser => write!(f, "Vivaldi Browser"),
- DeviceType::VivaldiExtension => write!(f, "Vivaldi Extension"),
- DeviceType::SafariExtension => write!(f, "Safari Extension"),
- DeviceType::Sdk => write!(f, "SDK"),
- DeviceType::Server => write!(f, "Server"),
+ Self::Android => write!(f, "Android"),
+ Self::Ios => write!(f, "iOS"),
+ Self::ChromeExtension => write!(f, "Chrome Extension"),
+ Self::FirefoxExtension => write!(f, "Firefox Extension"),
+ Self::OperaExtension => write!(f, "Opera Extension"),
+ Self::EdgeExtension => write!(f, "Edge Extension"),
+ Self::WindowsDesktop => write!(f, "Windows Desktop"),
+ Self::MacOsDesktop => write!(f, "MacOS Desktop"),
+ Self::LinuxDesktop => write!(f, "Linux Desktop"),
+ Self::ChromeBrowser => write!(f, "Chrome Browser"),
+ Self::FirefoxBrowser => write!(f, "Firefox Browser"),
+ Self::OperaBrowser => write!(f, "Opera Browser"),
+ Self::EdgeBrowser => write!(f, "Edge Browser"),
+ Self::IEBrowser => write!(f, "Internet Explorer"),
+ Self::UnknownBrowser => write!(f, "Unknown Browser"),
+ Self::AndroidAmazon => write!(f, "Android Amazon"),
+ Self::Uwp => write!(f, "UWP"),
+ Self::SafariBrowser => write!(f, "Safari Browser"),
+ Self::VivaldiBrowser => write!(f, "Vivaldi Browser"),
+ Self::VivaldiExtension => write!(f, "Vivaldi Extension"),
+ Self::SafariExtension => write!(f, "Safari Extension"),
+ Self::Sdk => write!(f, "SDK"),
+ Self::Server => write!(f, "Server"),
}
}
}
impl DeviceType {
- pub fn from_i32(value: i32) -> DeviceType {
+ pub const fn from_i32(value: i32) -> Self {
match value {
- 0 => DeviceType::Android,
- 1 => DeviceType::Ios,
- 2 => DeviceType::ChromeExtension,
- 3 => DeviceType::FirefoxExtension,
- 4 => DeviceType::OperaExtension,
- 5 => DeviceType::EdgeExtension,
- 6 => DeviceType::WindowsDesktop,
- 7 => DeviceType::MacOsDesktop,
- 8 => DeviceType::LinuxDesktop,
- 9 => DeviceType::ChromeBrowser,
- 10 => DeviceType::FirefoxBrowser,
- 11 => DeviceType::OperaBrowser,
- 12 => DeviceType::EdgeBrowser,
- 13 => DeviceType::IEBrowser,
- 14 => DeviceType::UnknownBrowser,
- 15 => DeviceType::AndroidAmazon,
- 16 => DeviceType::Uwp,
- 17 => DeviceType::SafariBrowser,
- 18 => DeviceType::VivaldiBrowser,
- 19 => DeviceType::VivaldiExtension,
- 20 => DeviceType::SafariExtension,
- 21 => DeviceType::Sdk,
- 22 => DeviceType::Server,
- _ => DeviceType::UnknownBrowser,
+ 0 => Self::Android,
+ 1 => Self::Ios,
+ 2 => Self::ChromeExtension,
+ 3 => Self::FirefoxExtension,
+ 4 => Self::OperaExtension,
+ 5 => Self::EdgeExtension,
+ 6 => Self::WindowsDesktop,
+ 7 => Self::MacOsDesktop,
+ 8 => Self::LinuxDesktop,
+ 9 => Self::ChromeBrowser,
+ 10 => Self::FirefoxBrowser,
+ 11 => Self::OperaBrowser,
+ 12 => Self::EdgeBrowser,
+ 13 => Self::IEBrowser,
+ 15 => Self::AndroidAmazon,
+ 16 => Self::Uwp,
+ 17 => Self::SafariBrowser,
+ 18 => Self::VivaldiBrowser,
+ 19 => Self::VivaldiExtension,
+ 20 => Self::SafariExtension,
+ 21 => Self::Sdk,
+ 22 => Self::Server,
+ _ => Self::UnknownBrowser,
}
}
}
diff --git a/src/db/models/emergency_access.rs b/src/db/models/emergency_access.rs
@@ -1,308 +0,0 @@
-use chrono::{NaiveDateTime, Utc};
-use serde_json::Value;
-
-use crate::{api::EmptyResult, db::DbConn, error::MapResult};
-
-use super::User;
-
-db_object! {
- #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
- #[diesel(table_name = emergency_access)]
- #[diesel(treat_none_as_null = true)]
- #[diesel(primary_key(uuid))]
- pub struct EmergencyAccess {
- pub uuid: String,
- pub grantor_uuid: String,
- pub grantee_uuid: Option<String>,
- pub email: Option<String>,
- pub key_encrypted: Option<String>,
- pub atype: i32, //EmergencyAccessType
- pub status: i32, //EmergencyAccessStatus
- pub wait_time_days: i32,
- pub recovery_initiated_at: Option<NaiveDateTime>,
- pub last_notification_at: Option<NaiveDateTime>,
- pub updated_at: NaiveDateTime,
- pub created_at: NaiveDateTime,
- }
-}
-
-/// Local methods
-
-impl EmergencyAccess {
- pub fn new(
- grantor_uuid: String,
- email: String,
- status: i32,
- atype: i32,
- wait_time_days: i32,
- ) -> Self {
- let now = Utc::now().naive_utc();
-
- Self {
- uuid: crate::util::get_uuid(),
- grantor_uuid,
- grantee_uuid: None,
- email: Some(email),
- status,
- atype,
- wait_time_days,
- recovery_initiated_at: None,
- created_at: now,
- updated_at: now,
- key_encrypted: None,
- last_notification_at: None,
- }
- }
-
- pub fn get_type_as_str(&self) -> &'static str {
- if self.atype == EmergencyAccessType::View as i32 {
- "View"
- } else {
- "Takeover"
- }
- }
-
- pub fn to_json(&self) -> Value {
- json!({
- "Id": self.uuid,
- "Status": self.status,
- "Type": self.atype,
- "WaitTimeDays": self.wait_time_days,
- "Object": "emergencyAccess",
- })
- }
-
- pub async fn to_json_grantor_details(&self, conn: &mut DbConn) -> Value {
- let grantor_user = User::find_by_uuid(&self.grantor_uuid, conn)
- .await
- .expect("Grantor user not found.");
-
- json!({
- "Id": self.uuid,
- "Status": self.status,
- "Type": self.atype,
- "WaitTimeDays": self.wait_time_days,
- "GrantorId": grantor_user.uuid,
- "Email": grantor_user.email,
- "Name": grantor_user.name,
- "Object": "emergencyAccessGrantorDetails",
- })
- }
-
- pub async fn to_json_grantee_details(&self, conn: &mut DbConn) -> Value {
- let grantee_user = if let Some(grantee_uuid) = self.grantee_uuid.as_deref() {
- Some(
- User::find_by_uuid(grantee_uuid, conn)
- .await
- .expect("Grantee user not found."),
- )
- } else if let Some(email) = self.email.as_deref() {
- Some(
- User::find_by_mail(email, conn)
- .await
- .expect("Grantee user not found."),
- )
- } else {
- None
- };
-
- json!({
- "Id": self.uuid,
- "Status": self.status,
- "Type": self.atype,
- "WaitTimeDays": self.wait_time_days,
- "GranteeId": grantee_user.as_ref().map_or("", |u| &u.uuid),
- "Email": grantee_user.as_ref().map_or("", |u| &u.email),
- "Name": grantee_user.as_ref().map_or("", |u| &u.name),
- "Object": "emergencyAccessGranteeDetails",
- })
- }
-}
-
-#[derive(Copy, Clone)]
-pub enum EmergencyAccessType {
- View = 0,
- Takeover = 1,
-}
-
-pub enum EmergencyAccessStatus {
- Invited = 0,
- RecoveryInitiated = 3,
- RecoveryApproved = 4,
-}
-
-impl EmergencyAccess {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
- User::update_uuid_revision(&self.grantor_uuid, conn).await;
- self.updated_at = Utc::now().naive_utc();
-
- db_run! { conn:
- sqlite, mysql {
- match diesel::replace_into(emergency_access::table)
- .values(EmergencyAccessDb::to_db(self))
- .execute(conn)
- {
- Ok(_) => Ok(()),
- // Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
- Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
- diesel::update(emergency_access::table)
- .filter(emergency_access::uuid.eq(&self.uuid))
- .set(EmergencyAccessDb::to_db(self))
- .execute(conn)
- .map_res("Error updating emergency access")
- }
- Err(e) => Err(e.into()),
- }.map_res("Error saving emergency access")
- }
- postgresql {
- let value = EmergencyAccessDb::to_db(self);
- diesel::insert_into(emergency_access::table)
- .values(&value)
- .on_conflict(emergency_access::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving emergency access")
- }
- }
- }
-
- pub async fn update_access_status_and_save(
- &mut self,
- status: i32,
- date: &NaiveDateTime,
- conn: &mut DbConn,
- ) -> EmptyResult {
- // Update the grantee so that it will refresh it's status.
- User::update_uuid_revision(
- self.grantee_uuid.as_ref().expect("Error getting grantee"),
- conn,
- )
- .await;
- self.status = status;
- self.updated_at = date.to_owned();
-
- db_run! {conn: {
- crate::util::retry(|| {
- diesel::update(emergency_access::table.filter(emergency_access::uuid.eq(&self.uuid)))
- .set((emergency_access::status.eq(status), emergency_access::updated_at.eq(date)))
- .execute(conn)
- }, 10)
- .map_res("Error updating emergency access status")
- }}
- }
-
- pub async fn update_last_notification_date_and_save(
- &mut self,
- date: &NaiveDateTime,
- conn: &mut DbConn,
- ) -> EmptyResult {
- self.last_notification_at = Some(date.to_owned());
- self.updated_at = date.to_owned();
-
- db_run! {conn: {
- crate::util::retry(|| {
- diesel::update(emergency_access::table.filter(emergency_access::uuid.eq(&self.uuid)))
- .set((emergency_access::last_notification_at.eq(date), emergency_access::updated_at.eq(date)))
- .execute(conn)
- }, 10)
- .map_res("Error updating emergency access status")
- }}
- }
-
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- for ea in Self::find_all_by_grantor_uuid(user_uuid, conn).await {
- ea.delete(conn).await?;
- }
- for ea in Self::find_all_by_grantee_uuid(user_uuid, conn).await {
- ea.delete(conn).await?;
- }
- Ok(())
- }
-
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
- User::update_uuid_revision(&self.grantor_uuid, conn).await;
-
- db_run! { conn: {
- diesel::delete(emergency_access::table.filter(emergency_access::uuid.eq(self.uuid)))
- .execute(conn)
- .map_res("Error removing user from emergency access")
- }}
- }
-
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
- db_run! { conn: {
- emergency_access::table
- .filter(emergency_access::uuid.eq(uuid))
- .first::<EmergencyAccessDb>(conn)
- .ok().from_db()
- }}
- }
-
- pub async fn find_by_grantor_uuid_and_grantee_uuid_or_email(
- grantor_uuid: &str,
- grantee_uuid: &str,
- email: &str,
- conn: &mut DbConn,
- ) -> Option<Self> {
- db_run! { conn: {
- emergency_access::table
- .filter(emergency_access::grantor_uuid.eq(grantor_uuid))
- .filter(emergency_access::grantee_uuid.eq(grantee_uuid).or(emergency_access::email.eq(email)))
- .first::<EmergencyAccessDb>(conn)
- .ok().from_db()
- }}
- }
-
- pub async fn find_all_recoveries_initiated(conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- emergency_access::table
- .filter(emergency_access::status.eq(EmergencyAccessStatus::RecoveryInitiated as i32))
- .filter(emergency_access::recovery_initiated_at.is_not_null())
- .load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
- }}
- }
-
- pub async fn find_by_uuid_and_grantor_uuid(
- uuid: &str,
- grantor_uuid: &str,
- conn: &mut DbConn,
- ) -> Option<Self> {
- db_run! { conn: {
- emergency_access::table
- .filter(emergency_access::uuid.eq(uuid))
- .filter(emergency_access::grantor_uuid.eq(grantor_uuid))
- .first::<EmergencyAccessDb>(conn)
- .ok().from_db()
- }}
- }
-
- pub async fn find_all_by_grantee_uuid(grantee_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- emergency_access::table
- .filter(emergency_access::grantee_uuid.eq(grantee_uuid))
- .load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
- }}
- }
-
- pub async fn find_invited_by_grantee_email(
- grantee_email: &str,
- conn: &mut DbConn,
- ) -> Option<Self> {
- db_run! { conn: {
- emergency_access::table
- .filter(emergency_access::email.eq(grantee_email))
- .filter(emergency_access::status.eq(EmergencyAccessStatus::Invited as i32))
- .first::<EmergencyAccessDb>(conn)
- .ok().from_db()
- }}
- }
-
- pub async fn find_all_by_grantor_uuid(grantor_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- emergency_access::table
- .filter(emergency_access::grantor_uuid.eq(grantor_uuid))
- .load::<EmergencyAccessDb>(conn).expect("Error loading emergency_access").from_db()
- }}
- }
-}
diff --git a/src/db/models/event.rs b/src/db/models/event.rs
@@ -1,312 +0,0 @@
-use crate::db::DbConn;
-use crate::{api::EmptyResult, error::MapResult};
-use chrono::{NaiveDateTime, Utc};
-use serde_json::Value;
-
-db_object! {
- // Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
- // Upstream: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Api/Models/Public/Response/EventResponseModel.cs
- // Upstream SQL: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Sql/dbo/Tables/Event.sql
- #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
- #[diesel(table_name = event)]
- #[diesel(primary_key(uuid))]
- pub struct Event {
- pub uuid: String,
- pub event_type: i32, // EventType
- pub user_uuid: Option<String>,
- pub org_uuid: Option<String>,
- pub cipher_uuid: Option<String>,
- pub collection_uuid: Option<String>,
- pub group_uuid: Option<String>,
- pub org_user_uuid: Option<String>,
- pub act_user_uuid: Option<String>,
- // Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/DeviceType.cs
- pub device_type: Option<i32>,
- pub ip_address: Option<String>,
- pub event_date: NaiveDateTime,
- pub policy_uuid: Option<String>,
- pub provider_uuid: Option<String>,
- pub provider_user_uuid: Option<String>,
- pub provider_org_uuid: Option<String>,
- }
-}
-
-// Upstream enum: https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Enums/EventType.cs
-#[derive(Debug, Copy, Clone)]
-pub enum EventType {
- // User
- UserLoggedIn = 1000,
- UserChangedPassword = 1001,
- UserUpdated2fa = 1002,
- UserDisabled2fa = 1003,
- UserRecovered2fa = 1004,
- UserFailedLogIn = 1005,
- UserFailedLogIn2fa = 1006,
- UserClientExportedVault = 1007,
- // UserUpdatedTempPassword = 1008, // Not supported
- // UserMigratedKeyToKeyConnector = 1009, // Not supported
-
- // Cipher
- CipherCreated = 1100,
- CipherUpdated = 1101,
- CipherDeleted = 1102,
- CipherAttachmentCreated = 1103,
- CipherAttachmentDeleted = 1104,
- CipherShared = 1105,
- CipherUpdatedCollections = 1106,
- CipherClientViewed = 1107,
- CipherClientToggledPasswordVisible = 1108,
- CipherClientToggledHiddenFieldVisible = 1109,
- CipherClientToggledCardCodeVisible = 1110,
- CipherClientCopiedPassword = 1111,
- CipherClientCopiedHiddenField = 1112,
- CipherClientCopiedCardCode = 1113,
- CipherClientAutofilled = 1114,
- CipherSoftDeleted = 1115,
- CipherRestored = 1116,
- CipherClientToggledCardNumberVisible = 1117,
-
- // Collection
- CollectionCreated = 1300,
- CollectionUpdated = 1301,
- CollectionDeleted = 1302,
-
- // Group
- GroupCreated = 1400,
- GroupUpdated = 1401,
- GroupDeleted = 1402,
-
- // OrganizationUser
- OrganizationUserInvited = 1500,
- OrganizationUserConfirmed = 1501,
- OrganizationUserUpdated = 1502,
- OrganizationUserRemoved = 1503,
- OrganizationUserUpdatedGroups = 1504,
- // OrganizationUserUnlinkedSso = 1505, // Not supported
- OrganizationUserResetPasswordEnroll = 1506,
- OrganizationUserResetPasswordWithdraw = 1507,
- OrganizationUserAdminResetPassword = 1508,
- // OrganizationUserResetSsoLink = 1509, // Not supported
- // OrganizationUserFirstSsoLogin = 1510, // Not supported
- OrganizationUserRevoked = 1511,
- OrganizationUserRestored = 1512,
-
- // Organization
- OrganizationUpdated = 1600,
- OrganizationPurgedVault = 1601,
- OrganizationClientExportedVault = 1602,
- // OrganizationVaultAccessed = 1603,
- // OrganizationEnabledSso = 1604, // Not supported
- // OrganizationDisabledSso = 1605, // Not supported
- // OrganizationEnabledKeyConnector = 1606, // Not supported
- // OrganizationDisabledKeyConnector = 1607, // Not supported
- // OrganizationSponsorshipsSynced = 1608, // Not supported
-
- // Policy
- PolicyUpdated = 1700,
- // Provider (Not yet supported)
- // ProviderUserInvited = 1800, // Not supported
- // ProviderUserConfirmed = 1801, // Not supported
- // ProviderUserUpdated = 1802, // Not supported
- // ProviderUserRemoved = 1803, // Not supported
- // ProviderOrganizationCreated = 1900, // Not supported
- // ProviderOrganizationAdded = 1901, // Not supported
- // ProviderOrganizationRemoved = 1902, // Not supported
- // ProviderOrganizationVaultAccessed = 1903, // Not supported
-}
-
-/// Local methods
-impl Event {
- pub fn new(event_type: i32, event_date: Option<NaiveDateTime>) -> Self {
- let event_date = match event_date {
- Some(d) => d,
- None => Utc::now().naive_utc(),
- };
-
- Self {
- uuid: crate::util::get_uuid(),
- event_type,
- user_uuid: None,
- org_uuid: None,
- cipher_uuid: None,
- collection_uuid: None,
- group_uuid: None,
- org_user_uuid: None,
- act_user_uuid: None,
- device_type: None,
- ip_address: None,
- event_date,
- policy_uuid: None,
- provider_uuid: None,
- provider_user_uuid: None,
- provider_org_uuid: None,
- }
- }
-
- pub fn to_json(&self) -> Value {
- use crate::util::format_date;
-
- json!({
- "type": self.event_type,
- "userId": self.user_uuid,
- "organizationId": self.org_uuid,
- "cipherId": self.cipher_uuid,
- "collectionId": self.collection_uuid,
- "groupId": self.group_uuid,
- "organizationUserId": self.org_user_uuid,
- "actingUserId": self.act_user_uuid,
- "date": format_date(&self.event_date),
- "deviceType": self.device_type,
- "ipAddress": self.ip_address,
- "policyId": self.policy_uuid,
- "providerId": self.provider_uuid,
- "providerUserId": self.provider_user_uuid,
- "providerOrganizationId": self.provider_org_uuid,
- // "installationId": null, // Not supported
- })
- }
-}
-
-/// Database methods
-/// https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs
-impl Event {
- pub const PAGE_SIZE: i64 = 30;
-
- /// #############
- /// Basic Queries
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn:
- sqlite, mysql {
- diesel::replace_into(event::table)
- .values(EventDb::to_db(self))
- .execute(conn)
- .map_res("Error saving event")
- }
- postgresql {
- diesel::insert_into(event::table)
- .values(EventDb::to_db(self))
- .on_conflict(event::uuid)
- .do_update()
- .set(EventDb::to_db(self))
- .execute(conn)
- .map_res("Error saving event")
- }
- }
- }
-
- pub async fn save_user_event(events: Vec<Event>, conn: &mut DbConn) -> EmptyResult {
- // Special save function which is able to handle multiple events.
- // SQLite doesn't support the DEFAULT argument, and does not support inserting multiple values at the same time.
- // MySQL and PostgreSQL do.
- // We also ignore duplicate if they ever will exists, else it could break the whole flow.
- db_run! { conn:
- // Unfortunately SQLite does not support inserting multiple records at the same time
- // We loop through the events here and insert them one at a time.
- sqlite {
- for event in events {
- diesel::insert_or_ignore_into(event::table)
- .values(EventDb::to_db(&event))
- .execute(conn)
- .unwrap_or_default();
- }
- Ok(())
- }
- mysql {
- let events: Vec<EventDb> = events.iter().map(EventDb::to_db).collect();
- diesel::insert_or_ignore_into(event::table)
- .values(&events)
- .execute(conn)
- .unwrap_or_default();
- Ok(())
- }
- postgresql {
- let events: Vec<EventDb> = events.iter().map(EventDb::to_db).collect();
- diesel::insert_into(event::table)
- .values(&events)
- .on_conflict_do_nothing()
- .execute(conn)
- .unwrap_or_default();
- Ok(())
- }
- }
- }
-
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn: {
- diesel::delete(event::table.filter(event::uuid.eq(self.uuid)))
- .execute(conn)
- .map_res("Error deleting event")
- }}
- }
-
- /// ##############
- /// Custom Queries
- pub async fn find_by_organization_uuid(
- org_uuid: &str,
- start: &NaiveDateTime,
- end: &NaiveDateTime,
- conn: &mut DbConn,
- ) -> Vec<Self> {
- db_run! { conn: {
- event::table
- .filter(event::org_uuid.eq(org_uuid))
- .filter(event::event_date.between(start, end))
- .order_by(event::event_date.desc())
- .limit(Self::PAGE_SIZE)
- .load::<EventDb>(conn)
- .expect("Error filtering events")
- .from_db()
- }}
- }
-
- pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
- db_run! { conn: {
- event::table
- .filter(event::org_uuid.eq(org_uuid))
- .count()
- .first::<i64>(conn)
- .ok()
- .unwrap_or(0)
- }}
- }
-
- pub async fn find_by_org_and_user_org(
- org_uuid: &str,
- user_org_uuid: &str,
- start: &NaiveDateTime,
- end: &NaiveDateTime,
- conn: &mut DbConn,
- ) -> Vec<Self> {
- db_run! { conn: {
- event::table
- .inner_join(users_organizations::table.on(users_organizations::uuid.eq(user_org_uuid)))
- .filter(event::org_uuid.eq(org_uuid))
- .filter(event::event_date.between(start, end))
- .filter(event::user_uuid.eq(users_organizations::user_uuid.nullable()).or(event::act_user_uuid.eq(users_organizations::user_uuid.nullable())))
- .select(event::all_columns)
- .order_by(event::event_date.desc())
- .limit(Self::PAGE_SIZE)
- .load::<EventDb>(conn)
- .expect("Error filtering events")
- .from_db()
- }}
- }
-
- pub async fn find_by_cipher_uuid(
- cipher_uuid: &str,
- start: &NaiveDateTime,
- end: &NaiveDateTime,
- conn: &mut DbConn,
- ) -> Vec<Self> {
- db_run! { conn: {
- event::table
- .filter(event::cipher_uuid.eq(cipher_uuid))
- .filter(event::event_date.between(start, end))
- .order_by(event::event_date.desc())
- .limit(Self::PAGE_SIZE)
- .load::<EventDb>(conn)
- .expect("Error filtering events")
- .from_db()
- }}
- }
-}
diff --git a/src/db/models/favorite.rs b/src/db/models/favorite.rs
@@ -1,23 +1,20 @@
use super::User;
-
+use crate::api::EmptyResult;
+use crate::db::DbConn;
+use crate::error::MapResult;
db_object! {
#[derive(Identifiable, Queryable, Insertable)]
#[diesel(table_name = favorites)]
#[diesel(primary_key(user_uuid, cipher_uuid))]
pub struct Favorite {
- pub user_uuid: String,
- pub cipher_uuid: String,
+ user_uuid: String,
+ cipher_uuid: String,
}
}
-use crate::db::DbConn;
-
-use crate::api::EmptyResult;
-use crate::error::MapResult;
-
impl Favorite {
// Returns whether the specified cipher is a favorite of the specified user.
- pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> bool {
+ pub async fn is_favorite(cipher_uuid: &str, user_uuid: &str, conn: &DbConn) -> bool {
db_run! { conn: {
let query = favorites::table
.filter(favorites::cipher_uuid.eq(cipher_uuid))
@@ -29,8 +26,16 @@ impl Favorite {
}
// Sets whether the specified cipher is a favorite of the specified user.
- pub async fn set_favorite(favorite: bool, cipher_uuid: &str, user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- let (old, new) = (Self::is_favorite(cipher_uuid, user_uuid, conn).await, favorite);
+ pub async fn set_favorite(
+ favorite: bool,
+ cipher_uuid: &str,
+ user_uuid: &str,
+ conn: &DbConn,
+ ) -> EmptyResult {
+ let (old, new) = (
+ Self::is_favorite(cipher_uuid, user_uuid, conn).await,
+ favorite,
+ );
match (old, new) {
(false, true) => {
User::update_uuid_revision(user_uuid, conn).await;
@@ -62,7 +67,7 @@ impl Favorite {
}
// Delete all favorite entries associated with the specified cipher.
- pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(favorites::table.filter(favorites::cipher_uuid.eq(cipher_uuid)))
.execute(conn)
@@ -71,7 +76,7 @@ impl Favorite {
}
// Delete all favorite entries associated with the specified user.
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(favorites::table.filter(favorites::user_uuid.eq(user_uuid)))
.execute(conn)
@@ -81,7 +86,7 @@ impl Favorite {
/// Return a vec with (cipher_uuid) this will only contain favorite flagged ciphers
/// This is used during a full sync so we only need one query for all favorite cipher matches.
- pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
+ pub async fn get_all_cipher_uuid_by_user(user_uuid: &str, conn: &DbConn) -> Vec<String> {
db_run! { conn: {
favorites::table
.filter(favorites::user_uuid.eq(user_uuid))
diff --git a/src/db/models/folder.rs b/src/db/models/folder.rs
@@ -1,15 +1,14 @@
+use super::User;
use chrono::{NaiveDateTime, Utc};
use serde_json::Value;
-use super::User;
-
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = folders)]
#[diesel(primary_key(uuid))]
pub struct Folder {
pub uuid: String,
- pub created_at: NaiveDateTime,
+ created_at: NaiveDateTime,
pub updated_at: NaiveDateTime,
pub user_uuid: String,
pub name: String,
@@ -19,8 +18,8 @@ db_object! {
#[diesel(table_name = folders_ciphers)]
#[diesel(primary_key(cipher_uuid, folder_uuid))]
pub struct FolderCipher {
- pub cipher_uuid: String,
- pub folder_uuid: String,
+ cipher_uuid: String,
+ folder_uuid: String,
}
}
@@ -28,7 +27,6 @@ db_object! {
impl Folder {
pub fn new(user_uuid: String, name: String) -> Self {
let now = Utc::now().naive_utc();
-
Self {
uuid: crate::util::get_uuid(),
created_at: now,
@@ -60,19 +58,18 @@ impl FolderCipher {
}
}
-use crate::db::DbConn;
-
use crate::api::EmptyResult;
+use crate::db::DbConn;
use crate::error::MapResult;
/// Database methods
impl Folder {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
self.updated_at = Utc::now().naive_utc();
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(folders::table)
.values(FolderDb::to_db(self))
.execute(conn)
@@ -89,20 +86,10 @@ impl Folder {
Err(e) => Err(e.into()),
}.map_res("Error saving folder")
}
- postgresql {
- let value = FolderDb::to_db(self);
- diesel::insert_into(folders::table)
- .values(&value)
- .on_conflict(folders::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving folder")
- }
}
}
- pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(&self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
FolderCipher::delete_all_by_folder(&self.uuid, conn).await?;
@@ -113,14 +100,14 @@ impl Folder {
}}
}
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
for folder in Self::find_by_user(user_uuid, conn).await {
folder.delete(conn).await?;
}
Ok(())
}
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
folders::table
.filter(folders::uuid.eq(uuid))
@@ -130,7 +117,7 @@ impl Folder {
}}
}
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
folders::table
.filter(folders::user_uuid.eq(user_uuid))
@@ -142,9 +129,9 @@ impl Folder {
}
impl FolderCipher {
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&self, conn: &DbConn) -> EmptyResult {
db_run! { conn:
- sqlite, mysql {
+ {
// Not checking for ForeignKey Constraints here.
// Table folders_ciphers does not have ForeignKey Constraints which would cause conflicts.
// This table has no constraints pointing to itself, but only to others.
@@ -153,18 +140,10 @@ impl FolderCipher {
.execute(conn)
.map_res("Error adding cipher to folder")
}
- postgresql {
- diesel::insert_into(folders_ciphers::table)
- .values(FolderCipherDb::to_db(self))
- .on_conflict((folders_ciphers::cipher_uuid, folders_ciphers::folder_uuid))
- .do_nothing()
- .execute(conn)
- .map_res("Error adding cipher to folder")
- }
}
}
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(self, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(
folders_ciphers::table
@@ -176,7 +155,7 @@ impl FolderCipher {
}}
}
- pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_cipher(cipher_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(folders_ciphers::table.filter(folders_ciphers::cipher_uuid.eq(cipher_uuid)))
.execute(conn)
@@ -184,7 +163,7 @@ impl FolderCipher {
}}
}
- pub async fn delete_all_by_folder(folder_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ async fn delete_all_by_folder(folder_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(folders_ciphers::table.filter(folders_ciphers::folder_uuid.eq(folder_uuid)))
.execute(conn)
@@ -192,7 +171,11 @@ impl FolderCipher {
}}
}
- pub async fn find_by_folder_and_cipher(folder_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_folder_and_cipher(
+ folder_uuid: &str,
+ cipher_uuid: &str,
+ conn: &DbConn,
+ ) -> Option<Self> {
db_run! { conn: {
folders_ciphers::table
.filter(folders_ciphers::folder_uuid.eq(folder_uuid))
@@ -202,20 +185,9 @@ impl FolderCipher {
.from_db()
}}
}
-
- pub async fn find_by_folder(folder_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- folders_ciphers::table
- .filter(folders_ciphers::folder_uuid.eq(folder_uuid))
- .load::<FolderCipherDb>(conn)
- .expect("Error loading folders")
- .from_db()
- }}
- }
-
/// Return a vec with (cipher_uuid, folder_uuid)
/// This is used during a full sync so we only need one query for all folder matches.
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<(String, String)> {
+ pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<(String, String)> {
db_run! { conn: {
folders_ciphers::table
.inner_join(folders::table)
diff --git a/src/db/models/group.rs b/src/db/models/group.rs
@@ -1,542 +0,0 @@
-use chrono::{NaiveDateTime, Utc};
-use serde_json::Value;
-
-db_object! {
- #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
- #[diesel(table_name = groups)]
- #[diesel(primary_key(uuid))]
- pub struct Group {
- pub uuid: String,
- pub organizations_uuid: String,
- pub name: String,
- pub access_all: bool,
- pub external_id: Option<String>,
- pub creation_date: NaiveDateTime,
- pub revision_date: NaiveDateTime,
- }
-
- #[derive(Identifiable, Queryable, Insertable)]
- #[diesel(table_name = collections_groups)]
- #[diesel(primary_key(collections_uuid, groups_uuid))]
- pub struct CollectionGroup {
- pub collections_uuid: String,
- pub groups_uuid: String,
- pub read_only: bool,
- pub hide_passwords: bool,
- }
-
- #[derive(Identifiable, Queryable, Insertable)]
- #[diesel(table_name = groups_users)]
- #[diesel(primary_key(groups_uuid, users_organizations_uuid))]
- pub struct GroupUser {
- pub groups_uuid: String,
- pub users_organizations_uuid: String
- }
-}
-
-/// Local methods
-impl Group {
- pub fn new(organizations_uuid: String, name: String, access_all: bool, external_id: Option<String>) -> Self {
- let now = Utc::now().naive_utc();
-
- let mut new_model = Self {
- uuid: crate::util::get_uuid(),
- organizations_uuid,
- name,
- access_all,
- external_id: None,
- creation_date: now,
- revision_date: now,
- };
-
- new_model.set_external_id(external_id);
-
- new_model
- }
-
- pub fn to_json(&self) -> Value {
- use crate::util::format_date;
-
- json!({
- "Id": self.uuid,
- "OrganizationId": self.organizations_uuid,
- "Name": self.name,
- "AccessAll": self.access_all,
- "ExternalId": self.external_id,
- "CreationDate": format_date(&self.creation_date),
- "RevisionDate": format_date(&self.revision_date),
- "Object": "group"
- })
- }
-
- pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
- let collections_groups: Vec<Value> = CollectionGroup::find_by_group(&self.uuid, conn)
- .await
- .iter()
- .map(|entry| {
- json!({
- "Id": entry.collections_uuid,
- "ReadOnly": entry.read_only,
- "HidePasswords": entry.hide_passwords
- })
- })
- .collect();
-
- json!({
- "Id": self.uuid,
- "OrganizationId": self.organizations_uuid,
- "Name": self.name,
- "AccessAll": self.access_all,
- "ExternalId": self.external_id,
- "Collections": collections_groups,
- "Object": "groupDetails"
- })
- }
-
- pub fn set_external_id(&mut self, external_id: Option<String>) {
- // Check if external_id is empty. We do not want to have empty strings in the database
- self.external_id = match external_id {
- Some(external_id) if !external_id.trim().is_empty() => Some(external_id),
- _ => None,
- };
- }
-}
-
-impl CollectionGroup {
- pub fn new(collections_uuid: String, groups_uuid: String, read_only: bool, hide_passwords: bool) -> Self {
- Self {
- collections_uuid,
- groups_uuid,
- read_only,
- hide_passwords,
- }
- }
-}
-
-impl GroupUser {
- pub fn new(groups_uuid: String, users_organizations_uuid: String) -> Self {
- Self {
- groups_uuid,
- users_organizations_uuid,
- }
- }
-}
-
-use crate::db::DbConn;
-
-use crate::api::EmptyResult;
-use crate::error::MapResult;
-
-use super::{User, UserOrganization};
-
-/// Database methods
-impl Group {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
- self.revision_date = Utc::now().naive_utc();
-
- db_run! { conn:
- sqlite, mysql {
- match diesel::replace_into(groups::table)
- .values(GroupDb::to_db(self))
- .execute(conn)
- {
- Ok(_) => Ok(()),
- // Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
- Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
- diesel::update(groups::table)
- .filter(groups::uuid.eq(&self.uuid))
- .set(GroupDb::to_db(self))
- .execute(conn)
- .map_res("Error saving group")
- }
- Err(e) => Err(e.into()),
- }.map_res("Error saving group")
- }
- postgresql {
- let value = GroupDb::to_db(self);
- diesel::insert_into(groups::table)
- .values(&value)
- .on_conflict(groups::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving group")
- }
- }
- }
-
- pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- for group in Self::find_by_organization(org_uuid, conn).await {
- group.delete(conn).await?;
- }
- Ok(())
- }
-
- pub async fn find_by_organization(organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- groups::table
- .filter(groups::organizations_uuid.eq(organizations_uuid))
- .load::<GroupDb>(conn)
- .expect("Error loading groups")
- .from_db()
- }}
- }
-
- pub async fn count_by_org(organizations_uuid: &str, conn: &mut DbConn) -> i64 {
- db_run! { conn: {
- groups::table
- .filter(groups::organizations_uuid.eq(organizations_uuid))
- .count()
- .first::<i64>(conn)
- .ok()
- .unwrap_or(0)
- }}
- }
-
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
- db_run! { conn: {
- groups::table
- .filter(groups::uuid.eq(uuid))
- .first::<GroupDb>(conn)
- .ok()
- .from_db()
- }}
- }
-
- pub async fn find_by_external_id(id: &str, conn: &mut DbConn) -> Option<Self> {
- db_run! { conn: {
- groups::table
- .filter(groups::external_id.eq(id))
- .first::<GroupDb>(conn)
- .ok()
- .from_db()
- }}
- }
- //Returns all organizations the user has full access to
- pub async fn gather_user_organizations_full_access(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
- db_run! { conn: {
- groups_users::table
- .inner_join(users_organizations::table.on(
- users_organizations::uuid.eq(groups_users::users_organizations_uuid)
- ))
- .inner_join(groups::table.on(
- groups::uuid.eq(groups_users::groups_uuid)
- ))
- .filter(users_organizations::user_uuid.eq(user_uuid))
- .filter(groups::access_all.eq(true))
- .select(groups::organizations_uuid)
- .distinct()
- .load::<String>(conn)
- .expect("Error loading organization group full access information for user")
- }}
- }
-
- pub async fn is_in_full_access_group(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> bool {
- db_run! { conn: {
- groups::table
- .inner_join(groups_users::table.on(
- groups_users::groups_uuid.eq(groups::uuid)
- ))
- .inner_join(users_organizations::table.on(
- users_organizations::uuid.eq(groups_users::users_organizations_uuid)
- ))
- .filter(users_organizations::user_uuid.eq(user_uuid))
- .filter(groups::organizations_uuid.eq(org_uuid))
- .filter(groups::access_all.eq(true))
- .select(groups::access_all)
- .first::<bool>(conn)
- .unwrap_or_default()
- }}
- }
-
- pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
- CollectionGroup::delete_all_by_group(&self.uuid, conn).await?;
- GroupUser::delete_all_by_group(&self.uuid, conn).await?;
-
- db_run! { conn: {
- diesel::delete(groups::table.filter(groups::uuid.eq(&self.uuid)))
- .execute(conn)
- .map_res("Error deleting group")
- }}
- }
-
- pub async fn update_revision(uuid: &str, conn: &mut DbConn) {
- if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
- warn!("Failed to update revision for {}: {:#?}", uuid, e);
- }
- }
-
- async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
- db_run! {conn: {
- crate::util::retry(|| {
- diesel::update(groups::table.filter(groups::uuid.eq(uuid)))
- .set(groups::revision_date.eq(date))
- .execute(conn)
- }, 10)
- .map_res("Error updating group revision")
- }}
- }
-}
-
-impl CollectionGroup {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
- let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await;
- for group_user in group_users {
- group_user.update_user_revision(conn).await;
- }
-
- db_run! { conn:
- sqlite, mysql {
- match diesel::replace_into(collections_groups::table)
- .values((
- collections_groups::collections_uuid.eq(&self.collections_uuid),
- collections_groups::groups_uuid.eq(&self.groups_uuid),
- collections_groups::read_only.eq(&self.read_only),
- collections_groups::hide_passwords.eq(&self.hide_passwords),
- ))
- .execute(conn)
- {
- Ok(_) => Ok(()),
- // Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
- Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
- diesel::update(collections_groups::table)
- .filter(collections_groups::collections_uuid.eq(&self.collections_uuid))
- .filter(collections_groups::groups_uuid.eq(&self.groups_uuid))
- .set((
- collections_groups::collections_uuid.eq(&self.collections_uuid),
- collections_groups::groups_uuid.eq(&self.groups_uuid),
- collections_groups::read_only.eq(&self.read_only),
- collections_groups::hide_passwords.eq(&self.hide_passwords),
- ))
- .execute(conn)
- .map_res("Error adding group to collection")
- }
- Err(e) => Err(e.into()),
- }.map_res("Error adding group to collection")
- }
- postgresql {
- diesel::insert_into(collections_groups::table)
- .values((
- collections_groups::collections_uuid.eq(&self.collections_uuid),
- collections_groups::groups_uuid.eq(&self.groups_uuid),
- collections_groups::read_only.eq(self.read_only),
- collections_groups::hide_passwords.eq(self.hide_passwords),
- ))
- .on_conflict((collections_groups::collections_uuid, collections_groups::groups_uuid))
- .do_update()
- .set((
- collections_groups::read_only.eq(self.read_only),
- collections_groups::hide_passwords.eq(self.hide_passwords),
- ))
- .execute(conn)
- .map_res("Error adding group to collection")
- }
- }
- }
-
- pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- collections_groups::table
- .filter(collections_groups::groups_uuid.eq(group_uuid))
- .load::<CollectionGroupDb>(conn)
- .expect("Error loading collection groups")
- .from_db()
- }}
- }
-
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- collections_groups::table
- .inner_join(groups_users::table.on(
- groups_users::groups_uuid.eq(collections_groups::groups_uuid)
- ))
- .inner_join(users_organizations::table.on(
- users_organizations::uuid.eq(groups_users::users_organizations_uuid)
- ))
- .filter(users_organizations::user_uuid.eq(user_uuid))
- .select(collections_groups::all_columns)
- .load::<CollectionGroupDb>(conn)
- .expect("Error loading user collection groups")
- .from_db()
- }}
- }
-
- pub async fn find_by_collection(collection_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- collections_groups::table
- .filter(collections_groups::collections_uuid.eq(collection_uuid))
- .select(collections_groups::all_columns)
- .load::<CollectionGroupDb>(conn)
- .expect("Error loading collection groups")
- .from_db()
- }}
- }
-
- pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
- let group_users = GroupUser::find_by_group(&self.groups_uuid, conn).await;
- for group_user in group_users {
- group_user.update_user_revision(conn).await;
- }
-
- db_run! { conn: {
- diesel::delete(collections_groups::table)
- .filter(collections_groups::collections_uuid.eq(&self.collections_uuid))
- .filter(collections_groups::groups_uuid.eq(&self.groups_uuid))
- .execute(conn)
- .map_res("Error deleting collection group")
- }}
- }
-
- pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- let group_users = GroupUser::find_by_group(group_uuid, conn).await;
- for group_user in group_users {
- group_user.update_user_revision(conn).await;
- }
-
- db_run! { conn: {
- diesel::delete(collections_groups::table)
- .filter(collections_groups::groups_uuid.eq(group_uuid))
- .execute(conn)
- .map_res("Error deleting collection group")
- }}
- }
-
- pub async fn delete_all_by_collection(collection_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- let collection_assigned_to_groups = CollectionGroup::find_by_collection(collection_uuid, conn).await;
- for collection_assigned_to_group in collection_assigned_to_groups {
- let group_users = GroupUser::find_by_group(&collection_assigned_to_group.groups_uuid, conn).await;
- for group_user in group_users {
- group_user.update_user_revision(conn).await;
- }
- }
-
- db_run! { conn: {
- diesel::delete(collections_groups::table)
- .filter(collections_groups::collections_uuid.eq(collection_uuid))
- .execute(conn)
- .map_res("Error deleting collection group")
- }}
- }
-}
-
-impl GroupUser {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
- self.update_user_revision(conn).await;
-
- db_run! { conn:
- sqlite, mysql {
- match diesel::replace_into(groups_users::table)
- .values((
- groups_users::users_organizations_uuid.eq(&self.users_organizations_uuid),
- groups_users::groups_uuid.eq(&self.groups_uuid),
- ))
- .execute(conn)
- {
- Ok(_) => Ok(()),
- // Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
- Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
- diesel::update(groups_users::table)
- .filter(groups_users::users_organizations_uuid.eq(&self.users_organizations_uuid))
- .filter(groups_users::groups_uuid.eq(&self.groups_uuid))
- .set((
- groups_users::users_organizations_uuid.eq(&self.users_organizations_uuid),
- groups_users::groups_uuid.eq(&self.groups_uuid),
- ))
- .execute(conn)
- .map_res("Error adding user to group")
- }
- Err(e) => Err(e.into()),
- }.map_res("Error adding user to group")
- }
- postgresql {
- diesel::insert_into(groups_users::table)
- .values((
- groups_users::users_organizations_uuid.eq(&self.users_organizations_uuid),
- groups_users::groups_uuid.eq(&self.groups_uuid),
- ))
- .on_conflict((groups_users::users_organizations_uuid, groups_users::groups_uuid))
- .do_update()
- .set((
- groups_users::users_organizations_uuid.eq(&self.users_organizations_uuid),
- groups_users::groups_uuid.eq(&self.groups_uuid),
- ))
- .execute(conn)
- .map_res("Error adding user to group")
- }
- }
- }
-
- pub async fn find_by_group(group_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- groups_users::table
- .filter(groups_users::groups_uuid.eq(group_uuid))
- .load::<GroupUserDb>(conn)
- .expect("Error loading group users")
- .from_db()
- }}
- }
-
- pub async fn find_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- groups_users::table
- .filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
- .load::<GroupUserDb>(conn)
- .expect("Error loading groups for user")
- .from_db()
- }}
- }
-
- pub async fn update_user_revision(&self, conn: &mut DbConn) {
- match UserOrganization::find_by_uuid(&self.users_organizations_uuid, conn).await {
- Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
- None => warn!("User could not be found!"),
- }
- }
-
- pub async fn delete_by_group_id_and_user_id(
- group_uuid: &str,
- users_organizations_uuid: &str,
- conn: &mut DbConn,
- ) -> EmptyResult {
- match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await {
- Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
- None => warn!("User could not be found!"),
- };
-
- db_run! { conn: {
- diesel::delete(groups_users::table)
- .filter(groups_users::groups_uuid.eq(group_uuid))
- .filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
- .execute(conn)
- .map_res("Error deleting group users")
- }}
- }
-
- pub async fn delete_all_by_group(group_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- let group_users = GroupUser::find_by_group(group_uuid, conn).await;
- for group_user in group_users {
- group_user.update_user_revision(conn).await;
- }
-
- db_run! { conn: {
- diesel::delete(groups_users::table)
- .filter(groups_users::groups_uuid.eq(group_uuid))
- .execute(conn)
- .map_res("Error deleting group users")
- }}
- }
-
- pub async fn delete_all_by_user(users_organizations_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- match UserOrganization::find_by_uuid(users_organizations_uuid, conn).await {
- Some(user) => User::update_uuid_revision(&user.user_uuid, conn).await,
- None => warn!("User could not be found!"),
- }
-
- db_run! { conn: {
- diesel::delete(groups_users::table)
- .filter(groups_users::users_organizations_uuid.eq(users_organizations_uuid))
- .execute(conn)
- .map_res("Error deleting user groups")
- }}
- }
-}
diff --git a/src/db/models/mod.rs b/src/db/models/mod.rs
@@ -1,33 +1,22 @@
-mod attachment;
mod auth_request;
mod cipher;
mod collection;
mod device;
-mod emergency_access;
-mod event;
mod favorite;
mod folder;
-mod group;
mod org_policy;
mod organization;
-mod send;
mod two_factor;
-mod two_factor_incomplete;
mod user;
-
-pub use self::attachment::Attachment;
pub use self::auth_request::AuthRequest;
pub use self::cipher::Cipher;
pub use self::collection::{Collection, CollectionCipher, CollectionUser};
pub use self::device::{Device, DeviceType};
-pub use self::emergency_access::{EmergencyAccess, EmergencyAccessStatus, EmergencyAccessType};
-pub use self::event::{Event, EventType};
pub use self::favorite::Favorite;
pub use self::folder::{Folder, FolderCipher};
-pub use self::group::{CollectionGroup, Group, GroupUser};
pub use self::org_policy::{OrgPolicy, OrgPolicyErr, OrgPolicyType};
-pub use self::organization::{Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization};
-pub use self::send::{Send, SendType};
+pub use self::organization::{
+ Organization, OrganizationApiKey, UserOrgStatus, UserOrgType, UserOrganization,
+};
pub use self::two_factor::{TwoFactor, TwoFactorType};
-pub use self::two_factor_incomplete::TwoFactorIncomplete;
-pub use self::user::{Invitation, User, UserKdfType, UserStampException};
+pub use self::user::{User, UserKdfType, UserStampException};
diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs
@@ -1,21 +1,19 @@
-use serde::Deserialize;
-use serde_json::Value;
-
+use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
use crate::api::EmptyResult;
use crate::db::DbConn;
use crate::error::MapResult;
use crate::util::UpCase;
-
-use super::{TwoFactor, UserOrgStatus, UserOrgType, UserOrganization};
+use serde::Deserialize;
+use serde_json::Value;
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = org_policies)]
#[diesel(primary_key(uuid))]
pub struct OrgPolicy {
- pub uuid: String,
- pub org_uuid: String,
- pub atype: i32,
+ uuid: String,
+ org_uuid: String,
+ atype: i32,
pub enabled: bool,
pub data: String,
}
@@ -28,31 +26,19 @@ pub enum OrgPolicyType {
MasterPassword = 1,
PasswordGenerator = 2,
SingleOrg = 3,
- // RequireSso = 4, // Not supported
PersonalOwnership = 5,
DisableSend = 6,
SendOptions = 7,
ResetPassword = 8,
- // MaximumVaultTimeout = 9, // Not supported (Not AGPLv3 Licensed)
- // DisablePersonalVaultExport = 10, // Not supported (Not AGPLv3 Licensed)
-}
-
-// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/SendOptionsPolicyData.cs
-#[derive(Deserialize)]
-#[allow(non_snake_case)]
-pub struct SendOptionsPolicyData {
- pub DisableHideEmail: bool,
}
// https://github.com/bitwarden/server/blob/5cbdee137921a19b1f722920f0fa3cd45af2ef0f/src/Core/Models/Data/Organizations/Policies/ResetPasswordDataModel.cs
#[derive(Deserialize)]
#[allow(non_snake_case)]
-pub struct ResetPasswordDataModel {
- pub AutoEnrollEnabled: bool,
+struct ResetPasswordDataModel {
+ AutoEnrollEnabled: bool,
}
-
-pub type OrgPolicyResult = Result<(), OrgPolicyErr>;
-
+type OrgPolicyResult = Result<(), OrgPolicyErr>;
#[derive(Debug)]
pub enum OrgPolicyErr {
TwoFactorMissing,
@@ -71,10 +57,6 @@ impl OrgPolicy {
}
}
- pub fn has_type(&self, policy_type: OrgPolicyType) -> bool {
- self.atype == policy_type as i32
- }
-
pub fn to_json(&self) -> Value {
let data_json: Value = serde_json::from_str(&self.data).unwrap_or(Value::Null);
json!({
@@ -90,9 +72,9 @@ impl OrgPolicy {
/// Database methods
impl OrgPolicy {
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&self, conn: &DbConn) -> EmptyResult {
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(org_policies::table)
.values(OrgPolicyDb::to_db(self))
.execute(conn)
@@ -109,49 +91,9 @@ impl OrgPolicy {
Err(e) => Err(e.into()),
}.map_res("Error saving org_policy")
}
- postgresql {
- let value = OrgPolicyDb::to_db(self);
- // We need to make sure we're not going to violate the unique constraint on org_uuid and atype.
- // This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
- // not support multiple constraints on ON CONFLICT clauses.
- diesel::delete(
- org_policies::table
- .filter(org_policies::org_uuid.eq(&self.org_uuid))
- .filter(org_policies::atype.eq(&self.atype)),
- )
- .execute(conn)
- .map_res("Error deleting org_policy for insert")?;
-
- diesel::insert_into(org_policies::table)
- .values(&value)
- .on_conflict(org_policies::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving org_policy")
- }
}
}
-
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn: {
- diesel::delete(org_policies::table.filter(org_policies::uuid.eq(self.uuid)))
- .execute(conn)
- .map_res("Error deleting org_policy")
- }}
- }
-
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
- db_run! { conn: {
- org_policies::table
- .filter(org_policies::uuid.eq(uuid))
- .first::<OrgPolicyDb>(conn)
- .ok()
- .from_db()
- }}
- }
-
- pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
org_policies::table
.filter(org_policies::org_uuid.eq(org_uuid))
@@ -161,7 +103,7 @@ impl OrgPolicy {
}}
}
- pub async fn find_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
org_policies::table
.inner_join(
@@ -182,7 +124,7 @@ impl OrgPolicy {
pub async fn find_by_org_and_type(
org_uuid: &str,
policy_type: OrgPolicyType,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Option<Self> {
db_run! { conn: {
org_policies::table
@@ -194,7 +136,7 @@ impl OrgPolicy {
}}
}
- pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(org_policies::table.filter(org_policies::org_uuid.eq(org_uuid)))
.execute(conn)
@@ -202,10 +144,10 @@ impl OrgPolicy {
}}
}
- pub async fn find_accepted_and_confirmed_by_user_and_active_policy(
+ async fn find_accepted_and_confirmed_by_user_and_active_policy(
user_uuid: &str,
policy_type: OrgPolicyType,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<Self> {
db_run! { conn: {
org_policies::table
@@ -229,30 +171,6 @@ impl OrgPolicy {
}}
}
- pub async fn find_confirmed_by_user_and_active_policy(
- user_uuid: &str,
- policy_type: OrgPolicyType,
- conn: &mut DbConn,
- ) -> Vec<Self> {
- db_run! { conn: {
- org_policies::table
- .inner_join(
- users_organizations::table.on(
- users_organizations::org_uuid.eq(org_policies::org_uuid)
- .and(users_organizations::user_uuid.eq(user_uuid)))
- )
- .filter(
- users_organizations::status.eq(UserOrgStatus::Confirmed as i32)
- )
- .filter(org_policies::atype.eq(policy_type as i32))
- .filter(org_policies::enabled.eq(true))
- .select(org_policies::all_columns)
- .load::<OrgPolicyDb>(conn)
- .expect("Error loading org_policy")
- .from_db()
- }}
- }
-
/// Returns true if the user belongs to an org that has enabled the specified policy type,
/// and the user is not an owner or admin of that org. This is only useful for checking
/// applicability of policy types that have these particular semantics.
@@ -260,9 +178,9 @@ impl OrgPolicy {
user_uuid: &str,
policy_type: OrgPolicyType,
exclude_org_uuid: Option<&str>,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> bool {
- for policy in OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(
+ for policy in Self::find_accepted_and_confirmed_by_user_and_active_policy(
user_uuid,
policy_type,
conn,
@@ -289,7 +207,7 @@ impl OrgPolicy {
user_uuid: &str,
org_uuid: &str,
exclude_current_org: bool,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> OrgPolicyResult {
// Enforce TwoFactor/TwoStep login
if TwoFactor::find_by_user(user_uuid, conn).await.is_empty() {
@@ -318,8 +236,8 @@ impl OrgPolicy {
Ok(())
}
- pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool {
- match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
+ pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &DbConn) -> bool {
+ match Self::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await {
Some(policy) => {
if let Ok(opts) =
serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data)
@@ -332,31 +250,4 @@ impl OrgPolicy {
false
}
-
- /// Returns true if the user belongs to an org that has enabled the `DisableHideEmail`
- /// option of the `Send Options` policy, and the user is not an owner or admin of that org.
- pub async fn is_hide_email_disabled(user_uuid: &str, conn: &mut DbConn) -> bool {
- for policy in OrgPolicy::find_confirmed_by_user_and_active_policy(
- user_uuid,
- OrgPolicyType::SendOptions,
- conn,
- )
- .await
- {
- if let Some(user) =
- UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await
- {
- if user.atype < UserOrgType::Admin {
- if let Ok(opts) =
- serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data)
- {
- if opts.data.DisableHideEmail {
- return true;
- }
- }
- }
- }
- }
- false
- }
}
diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs
@@ -1,10 +1,9 @@
+use super::{CollectionUser, OrgPolicy, OrgPolicyType, TwoFactor, User};
use chrono::{NaiveDateTime, Utc};
use num_traits::FromPrimitive;
use serde_json::Value;
use std::cmp::Ordering;
-use super::{CollectionUser, Group, GroupUser, OrgPolicy, OrgPolicyType, TwoFactor, User};
-
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = organizations)]
@@ -24,7 +23,6 @@ db_object! {
pub uuid: String,
pub user_uuid: String,
pub org_uuid: String,
-
pub access_all: bool,
pub akey: String,
pub status: i32,
@@ -39,13 +37,11 @@ db_object! {
pub struct OrganizationApiKey {
pub uuid: String,
pub org_uuid: String,
- pub atype: i32,
+ atype: i32,
pub api_key: String,
pub revision_date: NaiveDateTime,
}
}
-
-// https://github.com/bitwarden/server/blob/b86a04cef9f1e1b82cf18e49fc94e017c641130c/src/Core/Enums/OrganizationUserStatusType.cs
pub enum UserOrgStatus {
Revoked = -1,
Invited = 0,
@@ -64,17 +60,17 @@ pub enum UserOrgType {
impl UserOrgType {
pub fn from_str(s: &str) -> Option<Self> {
match s {
- "0" | "Owner" => Some(UserOrgType::Owner),
- "1" | "Admin" => Some(UserOrgType::Admin),
- "2" | "User" => Some(UserOrgType::User),
- "3" | "Manager" => Some(UserOrgType::Manager),
+ "0" | "Owner" => Some(Self::Owner),
+ "1" | "Admin" => Some(Self::Admin),
+ "2" | "User" => Some(Self::User),
+ "3" | "Manager" => Some(Self::Manager),
_ => None,
}
}
}
impl Ord for UserOrgType {
- fn cmp(&self, other: &UserOrgType) -> Ordering {
+ fn cmp(&self, other: &Self) -> Ordering {
// For easy comparison, map each variant to an access level (where 0 is lowest).
static ACCESS_LEVEL: [i32; 4] = [
3, // Owner
@@ -87,7 +83,7 @@ impl Ord for UserOrgType {
}
impl PartialOrd for UserOrgType {
- fn partial_cmp(&self, other: &UserOrgType) -> Option<Ordering> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
@@ -113,14 +109,14 @@ impl PartialOrd<i32> for UserOrgType {
fn ge(&self, other: &i32) -> bool {
matches!(
self.partial_cmp(other),
- Some(Ordering::Greater) | Some(Ordering::Equal)
+ Some(Ordering::Greater | Ordering::Equal)
)
}
}
impl PartialEq<UserOrgType> for i32 {
fn eq(&self, other: &UserOrgType) -> bool {
- *self == *other as i32
+ *self == *other as Self
}
}
@@ -139,27 +135,13 @@ impl PartialOrd<UserOrgType> for i32 {
fn le(&self, other: &UserOrgType) -> bool {
matches!(
self.partial_cmp(other),
- Some(Ordering::Less) | Some(Ordering::Equal) | None
+ Some(Ordering::Less | Ordering::Equal) | None
)
}
}
/// Local methods
impl Organization {
- pub fn new(
- name: String,
- billing_email: String,
- private_key: Option<String>,
- public_key: Option<String>,
- ) -> Self {
- Self {
- uuid: crate::util::get_uuid(),
- name,
- billing_email,
- private_key,
- public_key,
- }
- }
// https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/Organizations/OrganizationResponseModel.cs
pub fn to_json(&self) -> Value {
json!({
@@ -199,7 +181,6 @@ impl Organization {
})
}
}
-
// Used to either subtract or add to the current status
// The number 128 should be fine, it is well within the range of an i32
// The same goes for the database where we only use INTEGER (the same as an i32)
@@ -270,20 +251,19 @@ impl OrganizationApiKey {
}
}
-use crate::db::DbConn;
-
use crate::api::EmptyResult;
+use crate::db::DbConn;
use crate::error::MapResult;
/// Database methods
impl Organization {
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
- for user_org in UserOrganization::find_by_org(&self.uuid, conn).await.iter() {
+ pub async fn save(&self, conn: &DbConn) -> EmptyResult {
+ for user_org in &UserOrganization::find_by_org(&self.uuid, conn).await {
User::update_uuid_revision(&user_org.user_uuid, conn).await;
}
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(organizations::table)
.values(OrganizationDb::to_db(self))
.execute(conn)
@@ -301,27 +281,16 @@ impl Organization {
}.map_res("Error saving organization")
}
- postgresql {
- let value = OrganizationDb::to_db(self);
- diesel::insert_into(organizations::table)
- .values(&value)
- .on_conflict(organizations::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving organization")
- }
}
}
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(self, conn: &DbConn) -> EmptyResult {
use super::{Cipher, Collection};
Cipher::delete_all_by_organization(&self.uuid, conn).await?;
Collection::delete_all_by_organization(&self.uuid, conn).await?;
UserOrganization::delete_all_by_organization(&self.uuid, conn).await?;
OrgPolicy::delete_all_by_organization(&self.uuid, conn).await?;
- Group::delete_all_by_organization(&self.uuid, conn).await?;
db_run! { conn: {
diesel::delete(organizations::table.filter(organizations::uuid.eq(self.uuid)))
@@ -330,7 +299,7 @@ impl Organization {
}}
}
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
organizations::table
.filter(organizations::uuid.eq(uuid))
@@ -338,16 +307,10 @@ impl Organization {
.ok().from_db()
}}
}
-
- pub async fn get_all(conn: &mut DbConn) -> Vec<Self> {
- db_run! { conn: {
- organizations::table.load::<OrganizationDb>(conn).expect("Error loading organizations").from_db()
- }}
- }
}
impl UserOrganization {
- pub async fn to_json(&self, conn: &mut DbConn) -> Value {
+ pub async fn to_json(&self, conn: &DbConn) -> Value {
let org = Organization::find_by_uuid(&self.org_uuid, conn)
.await
.unwrap();
@@ -376,49 +339,20 @@ impl UserOrganization {
"UseSso": false, // Not supported
"ProviderId": null,
"ProviderName": null,
- // "KeyConnectorEnabled": false,
- // "KeyConnectorUrl": null,
-
- // TODO: Add support for Custom User Roles
- // See: https://bitwarden.com/help/article/user-types-access-control/#custom-role
- // "Permissions": {
- // "AccessEventLogs": false,
- // "AccessImportExport": false,
- // "AccessReports": false,
- // "ManageAllCollections": false,
- // "CreateNewCollections": false,
- // "EditAnyCollection": false,
- // "DeleteAnyCollection": false,
- // "ManageAssignedCollections": false,
- // "editAssignedCollections": false,
- // "deleteAssignedCollections": false,
- // "ManageCiphers": false,
- // "ManageGroups": false,
- // "ManagePolicies": false,
- // "ManageResetPassword": false,
- // "ManageSso": false, // Not supported
- // "ManageUsers": false,
- // "ManageScim": false, // Not supported (Not AGPLv3 Licensed)
- // },
-
"MaxStorageGb": 10, // The value doesn't matter, we don't check server-side
-
- // These are per user
"UserId": self.user_uuid,
"Key": self.akey,
"Status": self.status,
"Type": self.atype,
"Enabled": true,
-
"Object": "profileOrganization",
})
}
-
pub async fn to_json_user_details(
&self,
include_collections: bool,
_: bool,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Value {
let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap();
@@ -479,53 +413,11 @@ impl UserOrganization {
})
}
- pub async fn to_json_details(&self, conn: &mut DbConn) -> Value {
- let coll_uuids = if self.access_all {
- vec![] // If we have complete access, no need to fill the array
- } else {
- let collections = CollectionUser::find_by_organization_and_user_uuid(
- &self.org_uuid,
- &self.user_uuid,
- conn,
- )
- .await;
- collections
- .iter()
- .map(|c| {
- json!({
- "Id": c.collection_uuid,
- "ReadOnly": c.read_only,
- "HidePasswords": c.hide_passwords,
- })
- })
- .collect()
- };
-
- // Because BitWarden want the status to be -1 for revoked users we need to catch that here.
- // We subtract/add a number so we can restore/activate the user to it's previous state again.
- let status = if self.status < UserOrgStatus::Revoked as i32 {
- UserOrgStatus::Revoked as i32
- } else {
- self.status
- };
-
- json!({
- "Id": self.uuid,
- "UserId": self.user_uuid,
-
- "Status": status,
- "Type": self.atype,
- "AccessAll": self.access_all,
- "Collections": coll_uuids,
-
- "Object": "organizationUserDetails",
- })
- }
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(users_organizations::table)
.values(UserOrganizationDb::to_db(self))
.execute(conn)
@@ -542,24 +434,13 @@ impl UserOrganization {
Err(e) => Err(e.into()),
}.map_res("Error adding user to organization")
}
- postgresql {
- let value = UserOrganizationDb::to_db(self);
- diesel::insert_into(users_organizations::table)
- .values(&value)
- .on_conflict(users_organizations::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error adding user to organization")
- }
}
}
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(self, conn: &DbConn) -> EmptyResult {
User::update_uuid_revision(&self.user_uuid, conn).await;
CollectionUser::delete_all_by_user_and_org(&self.user_uuid, &self.org_uuid, conn).await?;
- GroupUser::delete_all_by_user(&self.uuid, conn).await?;
db_run! { conn: {
diesel::delete(users_organizations::table.filter(users_organizations::uuid.eq(self.uuid)))
@@ -568,29 +449,23 @@ impl UserOrganization {
}}
}
- pub async fn delete_all_by_organization(org_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ async fn delete_all_by_organization(org_uuid: &str, conn: &DbConn) -> EmptyResult {
for user_org in Self::find_by_org(org_uuid, conn).await {
user_org.delete(conn).await?;
}
Ok(())
}
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
for user_org in Self::find_any_state_by_user(user_uuid, conn).await {
user_org.delete(conn).await?;
}
Ok(())
}
- pub async fn find_by_email_and_org(
- email: &str,
- org_id: &str,
- conn: &mut DbConn,
- ) -> Option<UserOrganization> {
+ pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &DbConn) -> Option<Self> {
if let Some(user) = super::User::find_by_mail(email, conn).await {
- if let Some(user_org) =
- UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await
- {
+ if let Some(user_org) = Self::find_by_user_and_org(&user.uuid, org_id, conn).await {
return Some(user_org);
}
}
@@ -598,20 +473,16 @@ impl UserOrganization {
None
}
- pub fn has_status(&self, status: UserOrgStatus) -> bool {
+ const fn has_status(&self, status: UserOrgStatus) -> bool {
self.status == status as i32
}
- pub fn has_type(&self, user_type: UserOrgType) -> bool {
- self.atype == user_type as i32
- }
-
pub fn has_full_access(&self) -> bool {
(self.access_all || self.atype >= UserOrgType::Admin)
&& self.has_status(UserOrgStatus::Confirmed)
}
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::uuid.eq(uuid))
@@ -620,11 +491,7 @@ impl UserOrganization {
}}
}
- pub async fn find_by_uuid_and_org(
- uuid: &str,
- org_uuid: &str,
- conn: &mut DbConn,
- ) -> Option<Self> {
+ pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::uuid.eq(uuid))
@@ -634,7 +501,7 @@ impl UserOrganization {
}}
}
- pub async fn find_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
@@ -644,7 +511,7 @@ impl UserOrganization {
}}
}
- pub async fn find_invited_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_invited_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
@@ -654,7 +521,7 @@ impl UserOrganization {
}}
}
- pub async fn find_any_state_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ async fn find_any_state_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
@@ -663,7 +530,7 @@ impl UserOrganization {
}}
}
- pub async fn count_accepted_and_confirmed_by_user(user_uuid: &str, conn: &mut DbConn) -> i64 {
+ pub async fn count_accepted_and_confirmed_by_user(user_uuid: &str, conn: &DbConn) -> i64 {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
@@ -675,7 +542,7 @@ impl UserOrganization {
}}
}
- pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_org(org_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::org_uuid.eq(org_uuid))
@@ -684,21 +551,10 @@ impl UserOrganization {
}}
}
- pub async fn count_by_org(org_uuid: &str, conn: &mut DbConn) -> i64 {
- db_run! { conn: {
- users_organizations::table
- .filter(users_organizations::org_uuid.eq(org_uuid))
- .count()
- .first::<i64>(conn)
- .ok()
- .unwrap_or(0)
- }}
- }
-
pub async fn find_by_org_and_type(
org_uuid: &str,
atype: UserOrgType,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
@@ -712,7 +568,7 @@ impl UserOrganization {
pub async fn count_confirmed_by_org_and_type(
org_uuid: &str,
atype: UserOrgType,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> i64 {
db_run! { conn: {
users_organizations::table
@@ -728,7 +584,7 @@ impl UserOrganization {
pub async fn find_by_user_and_org(
user_uuid: &str,
org_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Option<Self> {
db_run! { conn: {
users_organizations::table
@@ -739,7 +595,7 @@ impl UserOrganization {
}}
}
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
.filter(users_organizations::user_uuid.eq(user_uuid))
@@ -748,20 +604,10 @@ impl UserOrganization {
}}
}
- pub async fn get_org_uuid_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<String> {
- db_run! { conn: {
- users_organizations::table
- .filter(users_organizations::user_uuid.eq(user_uuid))
- .select(users_organizations::org_uuid)
- .load::<String>(conn)
- .unwrap_or_default()
- }}
- }
-
pub async fn find_by_user_and_policy(
user_uuid: &str,
policy_type: OrgPolicyType,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
@@ -784,7 +630,7 @@ impl UserOrganization {
pub async fn find_by_cipher_and_org(
cipher_uuid: &str,
org_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
@@ -808,26 +654,10 @@ impl UserOrganization {
}}
}
- pub async fn user_has_ge_admin_access_to_cipher(
- user_uuid: &str,
- cipher_uuid: &str,
- conn: &mut DbConn,
- ) -> bool {
- db_run! { conn: {
- users_organizations::table
- .inner_join(ciphers::table.on(ciphers::uuid.eq(cipher_uuid).and(ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable()))))
- .filter(users_organizations::user_uuid.eq(user_uuid))
- .filter(users_organizations::atype.eq_any(vec![UserOrgType::Owner as i32, UserOrgType::Admin as i32]))
- .count()
- .first::<i64>(conn)
- .ok().unwrap_or(0) != 0
- }}
- }
-
pub async fn find_by_collection_and_org(
collection_uuid: &str,
org_uuid: &str,
- conn: &mut DbConn,
+ conn: &DbConn,
) -> Vec<Self> {
db_run! { conn: {
users_organizations::table
@@ -844,27 +674,12 @@ impl UserOrganization {
.load::<UserOrganizationDb>(conn).expect("Error loading user organizations").from_db()
}}
}
-
- pub async fn find_by_external_id_and_org(
- ext_id: &str,
- org_uuid: &str,
- conn: &mut DbConn,
- ) -> Option<Self> {
- db_run! {conn: {
- users_organizations::table
- .filter(
- users_organizations::external_id.eq(ext_id)
- .and(users_organizations::org_uuid.eq(org_uuid))
- )
- .first::<UserOrganizationDb>(conn).ok().from_db()
- }}
- }
}
impl OrganizationApiKey {
pub async fn save(&self, conn: &DbConn) -> EmptyResult {
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(organization_api_key::table)
.values(OrganizationApiKeyDb::to_db(self))
.execute(conn)
@@ -882,16 +697,6 @@ impl OrganizationApiKey {
}.map_res("Error saving organization")
}
- postgresql {
- let value = OrganizationApiKeyDb::to_db(self);
- diesel::insert_into(organization_api_key::table)
- .values(&value)
- .on_conflict((organization_api_key::uuid, organization_api_key::org_uuid))
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving organization")
- }
}
}
@@ -908,7 +713,6 @@ impl OrganizationApiKey {
#[cfg(test)]
mod tests {
use super::*;
-
#[test]
#[allow(non_snake_case)]
fn partial_cmp_UserOrgType() {
diff --git a/src/db/models/send.rs b/src/db/models/send.rs
@@ -1,315 +0,0 @@
-use chrono::{NaiveDateTime, Utc};
-use serde_json::Value;
-
-use super::User;
-
-db_object! {
- #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
- #[diesel(table_name = sends)]
- #[diesel(treat_none_as_null = true)]
- #[diesel(primary_key(uuid))]
- pub struct Send {
- pub uuid: String,
-
- pub user_uuid: Option<String>,
- pub organization_uuid: Option<String>,
-
-
- pub name: String,
- pub notes: Option<String>,
-
- pub atype: i32,
- pub data: String,
- pub akey: String,
- pub password_hash: Option<Vec<u8>>,
- password_salt: Option<Vec<u8>>,
- password_iter: Option<i32>,
-
- pub max_access_count: Option<i32>,
- pub access_count: i32,
-
- pub creation_date: NaiveDateTime,
- pub revision_date: NaiveDateTime,
- pub expiration_date: Option<NaiveDateTime>,
- pub deletion_date: NaiveDateTime,
-
- pub disabled: bool,
- pub hide_email: Option<bool>,
- }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, num_derive::FromPrimitive)]
-pub enum SendType {
- Text = 0,
- File = 1,
-}
-
-impl Send {
- pub fn new(
- atype: i32,
- name: String,
- data: String,
- akey: String,
- deletion_date: NaiveDateTime,
- ) -> Self {
- let now = Utc::now().naive_utc();
-
- Self {
- uuid: crate::util::get_uuid(),
- user_uuid: None,
- organization_uuid: None,
-
- name,
- notes: None,
-
- atype,
- data,
- akey,
- password_hash: None,
- password_salt: None,
- password_iter: None,
-
- max_access_count: None,
- access_count: 0,
-
- creation_date: now,
- revision_date: now,
- expiration_date: None,
- deletion_date,
-
- disabled: false,
- hide_email: None,
- }
- }
-
- pub fn set_password(&mut self, password: Option<&str>) {
- const PASSWORD_ITER: i32 = 100_000;
-
- if let Some(password) = password {
- self.password_iter = Some(PASSWORD_ITER);
- let salt = crate::crypto::get_random_bytes::<64>().to_vec();
- let hash =
- crate::crypto::hash_password(password.as_bytes(), &salt, PASSWORD_ITER as u32);
- self.password_salt = Some(salt);
- self.password_hash = Some(hash);
- } else {
- self.password_iter = None;
- self.password_salt = None;
- self.password_hash = None;
- }
- }
-
- pub fn check_password(&self, password: &str) -> bool {
- match (&self.password_hash, &self.password_salt, self.password_iter) {
- (Some(hash), Some(salt), Some(iter)) => {
- crate::crypto::verify_password_hash(password.as_bytes(), salt, hash, iter as u32)
- }
- _ => false,
- }
- }
-
- pub async fn creator_identifier(&self, conn: &mut DbConn) -> Option<String> {
- if let Some(hide_email) = self.hide_email {
- if hide_email {
- return None;
- }
- }
-
- if let Some(user_uuid) = &self.user_uuid {
- if let Some(user) = User::find_by_uuid(user_uuid, conn).await {
- return Some(user.email);
- }
- }
-
- None
- }
-
- pub fn to_json(&self) -> Value {
- use crate::util::format_date;
- use data_encoding::BASE64URL_NOPAD;
- use uuid::Uuid;
-
- let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
-
- json!({
- "Id": self.uuid,
- "AccessId": BASE64URL_NOPAD.encode(Uuid::parse_str(&self.uuid).unwrap_or_default().as_bytes()),
- "Type": self.atype,
-
- "Name": self.name,
- "Notes": self.notes,
- "Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
- "File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
-
- "Key": self.akey,
- "MaxAccessCount": self.max_access_count,
- "AccessCount": self.access_count,
- "Password": self.password_hash.as_deref().map(|h| BASE64URL_NOPAD.encode(h)),
- "Disabled": self.disabled,
- "HideEmail": self.hide_email,
-
- "RevisionDate": format_date(&self.revision_date),
- "ExpirationDate": self.expiration_date.as_ref().map(format_date),
- "DeletionDate": format_date(&self.deletion_date),
- "Object": "send",
- })
- }
-
- pub async fn to_json_access(&self, conn: &mut DbConn) -> Value {
- use crate::util::format_date;
-
- let data: Value = serde_json::from_str(&self.data).unwrap_or_default();
-
- json!({
- "Id": self.uuid,
- "Type": self.atype,
-
- "Name": self.name,
- "Text": if self.atype == SendType::Text as i32 { Some(&data) } else { None },
- "File": if self.atype == SendType::File as i32 { Some(&data) } else { None },
-
- "ExpirationDate": self.expiration_date.as_ref().map(format_date),
- "CreatorIdentifier": self.creator_identifier(conn).await,
- "Object": "send-access",
- })
- }
-}
-
-use crate::db::DbConn;
-
-use crate::api::EmptyResult;
-use crate::error::MapResult;
-
-impl Send {
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
- self.update_users_revision(conn).await;
- self.revision_date = Utc::now().naive_utc();
-
- db_run! { conn:
- sqlite, mysql {
- match diesel::replace_into(sends::table)
- .values(SendDb::to_db(self))
- .execute(conn)
- {
- Ok(_) => Ok(()),
- // Record already exists and causes a Foreign Key Violation because replace_into() wants to delete the record first.
- Err(diesel::result::Error::DatabaseError(diesel::result::DatabaseErrorKind::ForeignKeyViolation, _)) => {
- diesel::update(sends::table)
- .filter(sends::uuid.eq(&self.uuid))
- .set(SendDb::to_db(self))
- .execute(conn)
- .map_res("Error saving send")
- }
- Err(e) => Err(e.into()),
- }.map_res("Error saving send")
- }
- postgresql {
- let value = SendDb::to_db(self);
- diesel::insert_into(sends::table)
- .values(&value)
- .on_conflict(sends::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving send")
- }
- }
- }
-
- pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
- self.update_users_revision(conn).await;
-
- if self.atype == SendType::File as i32 {
- std::fs::remove_dir_all(
- std::path::Path::new(&crate::config::Config::SENDS_FOLDER).join(&self.uuid),
- )
- .ok();
- }
-
- db_run! { conn: {
- diesel::delete(sends::table.filter(sends::uuid.eq(&self.uuid)))
- .execute(conn)
- .map_res("Error deleting send")
- }}
- }
-
- /// Purge all sends that are past their deletion date.
- pub async fn purge(conn: &mut DbConn) {
- for send in Self::find_by_past_deletion_date(conn).await {
- send.delete(conn).await.ok();
- }
- }
-
- pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<String> {
- let mut user_uuids = Vec::new();
- match &self.user_uuid {
- Some(user_uuid) => {
- User::update_uuid_revision(user_uuid, conn).await;
- user_uuids.push(user_uuid.clone())
- }
- None => {
- // Belongs to Organization, not implemented
- }
- };
- user_uuids
- }
-
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- for send in Self::find_by_user(user_uuid, conn).await {
- send.delete(conn).await?;
- }
- Ok(())
- }
-
- pub async fn find_by_access_id(access_id: &str, conn: &mut DbConn) -> Option<Self> {
- use data_encoding::BASE64URL_NOPAD;
- use uuid::Uuid;
-
- let uuid_vec = match BASE64URL_NOPAD.decode(access_id.as_bytes()) {
- Ok(v) => v,
- Err(_) => return None,
- };
-
- let uuid = match Uuid::from_slice(&uuid_vec) {
- Ok(u) => u.to_string(),
- Err(_) => return None,
- };
-
- Self::find_by_uuid(&uuid, conn).await
- }
-
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
- db_run! {conn: {
- sends::table
- .filter(sends::uuid.eq(uuid))
- .first::<SendDb>(conn)
- .ok()
- .from_db()
- }}
- }
-
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! {conn: {
- sends::table
- .filter(sends::user_uuid.eq(user_uuid))
- .load::<SendDb>(conn).expect("Error loading sends").from_db()
- }}
- }
-
- pub async fn find_by_org(org_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
- db_run! {conn: {
- sends::table
- .filter(sends::organization_uuid.eq(org_uuid))
- .load::<SendDb>(conn).expect("Error loading sends").from_db()
- }}
- }
-
- pub async fn find_by_past_deletion_date(conn: &mut DbConn) -> Vec<Self> {
- let now = Utc::now().naive_utc();
- db_run! {conn: {
- sends::table
- .filter(sends::deletion_date.lt(now))
- .load::<SendDb>(conn).expect("Error loading sends").from_db()
- }}
- }
-}
diff --git a/src/db/models/two_factor.rs b/src/db/models/two_factor.rs
@@ -1,14 +1,13 @@
-use serde_json::Value;
-
use crate::{api::EmptyResult, db::DbConn, error::MapResult};
+use serde_json::Value;
db_object! {
#[derive(Identifiable, Queryable, Insertable, AsChangeset)]
#[diesel(table_name = twofactor)]
#[diesel(primary_key(uuid))]
pub struct TwoFactor {
- pub uuid: String,
- pub user_uuid: String,
+ uuid: String,
+ user_uuid: String,
pub atype: i32,
pub enabled: bool,
pub data: String,
@@ -23,8 +22,6 @@ pub enum TwoFactorType {
Webauthn = 7,
WebauthnRegisterChallenge = 1003,
WebauthnLoginChallenge = 1004,
-
- // Special type for Protected Actions verification via email
ProtectedActions = 2000,
}
@@ -41,14 +38,6 @@ impl TwoFactor {
}
}
- pub fn to_json(&self) -> Value {
- json!({
- "Enabled": self.enabled,
- "Key": "", // This key and value vary
- "Object": "twoFactorAuthenticator" // This value varies
- })
- }
-
pub fn to_json_provider(&self) -> Value {
json!({
"Enabled": self.enabled,
@@ -60,9 +49,9 @@ impl TwoFactor {
/// Database methods
impl TwoFactor {
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&self, conn: &DbConn) -> EmptyResult {
db_run! { conn:
- sqlite, mysql {
+ {
match diesel::replace_into(twofactor::table)
.values(TwoFactorDb::to_db(self))
.execute(conn)
@@ -79,27 +68,10 @@ impl TwoFactor {
Err(e) => Err(e.into()),
}.map_res("Error saving twofactor")
}
- postgresql {
- let value = TwoFactorDb::to_db(self);
- // We need to make sure we're not going to violate the unique constraint on user_uuid and atype.
- // This happens automatically on other DBMS backends due to replace_into(). PostgreSQL does
- // not support multiple constraints on ON CONFLICT clauses.
- diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(&self.user_uuid)).filter(twofactor::atype.eq(&self.atype)))
- .execute(conn)
- .map_res("Error deleting twofactor for insert")?;
-
- diesel::insert_into(twofactor::table)
- .values(&value)
- .on_conflict(twofactor::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving twofactor")
- }
}
}
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(self, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(twofactor::table.filter(twofactor::uuid.eq(self.uuid)))
.execute(conn)
@@ -107,7 +79,7 @@ impl TwoFactor {
}}
}
- pub async fn find_by_user(user_uuid: &str, conn: &mut DbConn) -> Vec<Self> {
+ pub async fn find_by_user(user_uuid: &str, conn: &DbConn) -> Vec<Self> {
db_run! { conn: {
twofactor::table
.filter(twofactor::user_uuid.eq(user_uuid))
@@ -118,11 +90,7 @@ impl TwoFactor {
}}
}
- pub async fn find_by_user_and_type(
- user_uuid: &str,
- atype: i32,
- conn: &mut DbConn,
- ) -> Option<Self> {
+ pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &DbConn) -> Option<Self> {
db_run! { conn: {
twofactor::table
.filter(twofactor::user_uuid.eq(user_uuid))
@@ -133,7 +101,7 @@ impl TwoFactor {
}}
}
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete_all_by_user(user_uuid: &str, conn: &DbConn) -> EmptyResult {
db_run! { conn: {
diesel::delete(twofactor::table.filter(twofactor::user_uuid.eq(user_uuid)))
.execute(conn)
diff --git a/src/db/models/two_factor_incomplete.rs b/src/db/models/two_factor_incomplete.rs
@@ -1,71 +0,0 @@
-use crate::{api::EmptyResult, db::DbConn, error::MapResult};
-use chrono::NaiveDateTime;
-
-db_object! {
- #[derive(Identifiable, Queryable, Insertable, AsChangeset)]
- #[diesel(table_name = twofactor_incomplete)]
- #[diesel(primary_key(user_uuid, device_uuid))]
- pub struct TwoFactorIncomplete {
- pub user_uuid: String,
- // This device UUID is simply what's claimed by the device. It doesn't
- // necessarily correspond to any UUID in the devices table, since a device
- // must complete 2FA login before being added into the devices table.
- pub device_uuid: String,
- pub device_name: String,
- pub login_time: NaiveDateTime,
- pub ip_address: String,
- }
-}
-
-impl TwoFactorIncomplete {
- pub async fn find_by_user_and_device(
- user_uuid: &str,
- device_uuid: &str,
- conn: &mut DbConn,
- ) -> Option<Self> {
- db_run! { conn: {
- twofactor_incomplete::table
- .filter(twofactor_incomplete::user_uuid.eq(user_uuid))
- .filter(twofactor_incomplete::device_uuid.eq(device_uuid))
- .first::<TwoFactorIncompleteDb>(conn)
- .ok()
- .from_db()
- }}
- }
-
- pub async fn find_logins_before(dt: &NaiveDateTime, conn: &mut DbConn) -> Vec<Self> {
- db_run! {conn: {
- twofactor_incomplete::table
- .filter(twofactor_incomplete::login_time.lt(dt))
- .load::<TwoFactorIncompleteDb>(conn)
- .expect("Error loading twofactor_incomplete")
- .from_db()
- }}
- }
-
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
- Self::delete_by_user_and_device(&self.user_uuid, &self.device_uuid, conn).await
- }
-
- pub async fn delete_by_user_and_device(
- user_uuid: &str,
- device_uuid: &str,
- conn: &mut DbConn,
- ) -> EmptyResult {
- db_run! { conn: {
- diesel::delete(twofactor_incomplete::table
- .filter(twofactor_incomplete::user_uuid.eq(user_uuid))
- .filter(twofactor_incomplete::device_uuid.eq(device_uuid)))
- .execute(conn)
- .map_res("Error in twofactor_incomplete::delete_by_user_and_device()")
- }}
- }
-
- pub async fn delete_all_by_user(user_uuid: &str, conn: &mut DbConn) -> EmptyResult {
- db_run! { conn: {
- diesel::delete(twofactor_incomplete::table.filter(twofactor_incomplete::user_uuid.eq(user_uuid)))
- .execute(conn)
- .map_res("Error in twofactor_incomplete::delete_all_by_user()")
- }}
- }
-}
diff --git a/src/db/models/user.rs b/src/db/models/user.rs
@@ -17,49 +17,32 @@ db_object! {
pub verified_at: Option<NaiveDateTime>,
pub last_verifying_at: Option<NaiveDateTime>,
pub login_verify_count: i32,
-
pub email: String,
pub email_new: Option<String>,
pub email_new_token: Option<String>,
pub name: String,
-
pub password_hash: Vec<u8>,
pub salt: Vec<u8>,
pub password_iterations: i32,
pub password_hint: Option<String>,
-
pub akey: String,
pub private_key: Option<String>,
pub public_key: Option<String>,
-
#[diesel(column_name = "totp_secret")] // Note, this is only added to the UserDb structs, not to User
_totp_secret: Option<String>,
pub totp_recover: Option<String>,
-
pub security_stamp: String,
pub stamp_exception: Option<String>,
-
pub equivalent_domains: String,
pub excluded_globals: String,
-
pub client_kdf_type: i32,
pub client_kdf_iter: i32,
pub client_kdf_memory: Option<i32>,
pub client_kdf_parallelism: Option<i32>,
-
pub api_key: Option<String>,
-
pub avatar_color: Option<String>,
-
pub external_id: Option<String>, // Todo: Needs to be removed in the future, this is not used anymore.
}
-
- #[derive(Identifiable, Queryable, Insertable)]
- #[diesel(table_name = invitations)]
- #[diesel(primary_key(email))]
- pub struct Invitation {
- pub email: String,
- }
}
pub enum UserKdfType {
@@ -105,7 +88,7 @@ impl User {
password_hash: Vec::new(),
salt: crypto::get_random_bytes::<64>().to_vec(),
- password_iterations: config::get_config().password_iterations,
+ password_iterations: config::get_config().password_iterations as i32,
security_stamp: crate::util::get_uuid(),
stamp_exception: None,
@@ -138,16 +121,15 @@ impl User {
password.as_bytes(),
&self.salt,
&self.password_hash,
- self.password_iterations as u32,
+ u32::try_from(self.password_iterations)
+ .expect("underflow converting password iterations into a u32"),
)
}
pub fn check_valid_recovery_code(&self, recovery_code: &str) -> bool {
- if let Some(ref totp_recover) = self.totp_recover {
+ self.totp_recover.as_ref().map_or(false, |totp_recover| {
crate::crypto::ct_eq(recovery_code, totp_recover.to_lowercase())
- } else {
- false
- }
+ })
}
pub fn check_valid_api_key(&self, key: &str) -> bool {
@@ -175,7 +157,8 @@ impl User {
self.password_hash = crypto::hash_password(
password.as_bytes(),
&self.salt,
- self.password_iterations as u32,
+ u32::try_from(self.password_iterations)
+ .expect("underflow converting password iterations into a u32"),
);
if let Some(route) = allow_next_route {
@@ -187,7 +170,7 @@ impl User {
}
if reset_security_stamp {
- self.reset_security_stamp()
+ self.reset_security_stamp();
}
}
@@ -217,18 +200,14 @@ impl User {
}
}
-use super::{
- Cipher, Device, EmergencyAccess, Favorite, Folder, Send, TwoFactor, TwoFactorIncomplete,
- UserOrgType, UserOrganization,
-};
-use crate::db::DbConn;
-
+use super::{Cipher, Device, Favorite, Folder, TwoFactor, UserOrgType, UserOrganization};
use crate::api::EmptyResult;
+use crate::db::DbConn;
use crate::error::MapResult;
/// Database methods
impl User {
- pub async fn to_json(&self, conn: &mut DbConn) -> Value {
+ pub async fn to_json(&self, conn: &DbConn) -> Value {
let mut orgs_json = Vec::new();
for c in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
orgs_json.push(c.to_json(conn).await);
@@ -265,7 +244,7 @@ impl User {
})
}
- pub async fn save(&mut self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn save(&mut self, conn: &DbConn) -> EmptyResult {
if self.email.trim().is_empty() {
err!("User email can't be empty")
}
@@ -273,7 +252,7 @@ impl User {
self.updated_at = Utc::now().naive_utc();
db_run! {conn:
- sqlite, mysql {
+ {
match diesel::replace_into(users::table)
.values(UserDb::to_db(self))
.execute(conn)
@@ -290,20 +269,10 @@ impl User {
Err(e) => Err(e.into()),
}.map_res("Error saving user")
}
- postgresql {
- let value = UserDb::to_db(self);
- diesel::insert_into(users::table) // Insert or update
- .values(&value)
- .on_conflict(users::uuid)
- .do_update()
- .set(&value)
- .execute(conn)
- .map_res("Error saving user")
- }
}
}
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn delete(self, conn: &DbConn) -> EmptyResult {
for user_org in UserOrganization::find_confirmed_by_user(&self.uuid, conn).await {
if user_org.atype == UserOrgType::Owner
&& UserOrganization::count_confirmed_by_org_and_type(
@@ -317,17 +286,12 @@ impl User {
err!("Can't delete last owner")
}
}
-
- Send::delete_all_by_user(&self.uuid, conn).await?;
- EmergencyAccess::delete_all_by_user(&self.uuid, conn).await?;
UserOrganization::delete_all_by_user(&self.uuid, conn).await?;
Cipher::delete_all_by_user(&self.uuid, conn).await?;
Favorite::delete_all_by_user(&self.uuid, conn).await?;
Folder::delete_all_by_user(&self.uuid, conn).await?;
Device::delete_all_by_user(&self.uuid, conn).await?;
TwoFactor::delete_all_by_user(&self.uuid, conn).await?;
- TwoFactorIncomplete::delete_all_by_user(&self.uuid, conn).await?;
- Invitation::take(&self.email, conn).await; // Delete invitation if any
db_run! {conn: {
diesel::delete(users::table.filter(users::uuid.eq(self.uuid)))
@@ -336,13 +300,13 @@ impl User {
}}
}
- pub async fn update_uuid_revision(uuid: &str, conn: &mut DbConn) {
+ pub async fn update_uuid_revision(uuid: &str, conn: &DbConn) {
if let Err(e) = Self::_update_revision(uuid, &Utc::now().naive_utc(), conn).await {
warn!("Failed to update revision for {}: {:#?}", uuid, e);
}
}
- pub async fn update_all_revisions(conn: &mut DbConn) -> EmptyResult {
+ pub async fn update_all_revisions(conn: &DbConn) -> EmptyResult {
let updated_at = Utc::now().naive_utc();
db_run! {conn: {
@@ -355,13 +319,13 @@ impl User {
}}
}
- pub async fn update_revision(&mut self, conn: &mut DbConn) -> EmptyResult {
+ pub async fn update_revision(&mut self, conn: &DbConn) -> EmptyResult {
self.updated_at = Utc::now().naive_utc();
Self::_update_revision(&self.uuid, &self.updated_at, conn).await
}
- async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &mut DbConn) -> EmptyResult {
+ async fn _update_revision(uuid: &str, date: &NaiveDateTime, conn: &DbConn) -> EmptyResult {
db_run! {conn: {
crate::util::retry(|| {
diesel::update(users::table.filter(users::uuid.eq(uuid)))
@@ -372,7 +336,7 @@ impl User {
}}
}
- pub async fn find_by_mail(mail: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_mail(mail: &str, conn: &DbConn) -> Option<Self> {
let lower_mail = mail.to_lowercase();
db_run! {conn: {
users::table
@@ -383,80 +347,22 @@ impl User {
}}
}
- pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> {
+ pub async fn find_by_uuid(uuid: &str, conn: &DbConn) -> Option<Self> {
db_run! {conn: {
users::table.filter(users::uuid.eq(uuid)).first::<UserDb>(conn).ok().from_db()
}}
}
- pub async fn get_all(conn: &mut DbConn) -> Vec<Self> {
+ pub async fn get_all(conn: &DbConn) -> Vec<Self> {
db_run! {conn: {
users::table.load::<UserDb>(conn).expect("Error loading users").from_db()
}}
}
- pub async fn last_active(&self, conn: &mut DbConn) -> Option<NaiveDateTime> {
+ pub async fn last_active(&self, conn: &DbConn) -> Option<NaiveDateTime> {
match Device::find_latest_active_by_user(&self.uuid, conn).await {
Some(device) => Some(device.updated_at),
None => None,
}
}
}
-
-impl Invitation {
- pub fn new(email: &str) -> Self {
- let email = email.to_lowercase();
- Self { email }
- }
-
- pub async fn save(&self, conn: &mut DbConn) -> EmptyResult {
- if self.email.trim().is_empty() {
- err!("Invitation email can't be empty")
- }
-
- db_run! {conn:
- sqlite, mysql {
- // Not checking for ForeignKey Constraints here
- // Table invitations does not have any ForeignKey Constraints.
- diesel::replace_into(invitations::table)
- .values(InvitationDb::to_db(self))
- .execute(conn)
- .map_res("Error saving invitation")
- }
- postgresql {
- diesel::insert_into(invitations::table)
- .values(InvitationDb::to_db(self))
- .on_conflict(invitations::email)
- .do_nothing()
- .execute(conn)
- .map_res("Error saving invitation")
- }
- }
- }
-
- pub async fn delete(self, conn: &mut DbConn) -> EmptyResult {
- db_run! {conn: {
- diesel::delete(invitations::table.filter(invitations::email.eq(self.email)))
- .execute(conn)
- .map_res("Error deleting invitation")
- }}
- }
-
- pub async fn find_by_mail(mail: &str, conn: &mut DbConn) -> Option<Self> {
- let lower_mail = mail.to_lowercase();
- db_run! {conn: {
- invitations::table
- .filter(invitations::email.eq(lower_mail))
- .first::<InvitationDb>(conn)
- .ok()
- .from_db()
- }}
- }
-
- pub async fn take(mail: &str, conn: &mut DbConn) -> bool {
- match Self::find_by_mail(mail, conn).await {
- Some(invitation) => invitation.delete(conn).await.is_ok(),
- None => false,
- }
- }
-}
diff --git a/src/db/schemas/sqlite/schema.rs b/src/db/schemas/sqlite/schema.rs
@@ -1,10 +1,20 @@
table! {
- attachments (id) {
- id -> Text,
- cipher_uuid -> Text,
- file_name -> Text,
- file_size -> Integer,
- akey -> Nullable<Text>,
+ auth_requests (uuid) {
+ uuid -> Text,
+ user_uuid -> Text,
+ organization_uuid -> Nullable<Text>,
+ request_device_identifier -> Text,
+ device_type -> Integer,
+ request_ip -> Text,
+ response_device_id -> Nullable<Text>,
+ access_code -> Text,
+ public_key -> Text,
+ enc_key -> Nullable<Text>,
+ master_password_hash -> Nullable<Text>,
+ approved -> Nullable<Bool>,
+ creation_date -> Timestamp,
+ response_date -> Nullable<Timestamp>,
+ authentication_date -> Nullable<Timestamp>,
}
}
@@ -26,7 +36,6 @@ table! {
reprompt -> Nullable<Integer>,
}
}
-
table! {
ciphers_collections (cipher_uuid, collection_uuid) {
cipher_uuid -> Text,
@@ -42,7 +51,6 @@ table! {
external_id -> Nullable<Text>,
}
}
-
table! {
devices (uuid, user_uuid) {
uuid -> Text,
@@ -59,27 +67,6 @@ table! {
}
table! {
- event (uuid) {
- uuid -> Text,
- event_type -> Integer,
- user_uuid -> Nullable<Text>,
- org_uuid -> Nullable<Text>,
- cipher_uuid -> Nullable<Text>,
- collection_uuid -> Nullable<Text>,
- group_uuid -> Nullable<Text>,
- org_user_uuid -> Nullable<Text>,
- act_user_uuid -> Nullable<Text>,
- device_type -> Nullable<Integer>,
- ip_address -> Nullable<Text>,
- event_date -> Timestamp,
- policy_uuid -> Nullable<Text>,
- provider_uuid -> Nullable<Text>,
- provider_user_uuid -> Nullable<Text>,
- provider_org_uuid -> Nullable<Text>,
- }
-}
-
-table! {
favorites (user_uuid, cipher_uuid) {
user_uuid -> Text,
cipher_uuid -> Text,
@@ -118,38 +105,23 @@ table! {
data -> Text,
}
}
-
table! {
- organizations (uuid) {
+ organization_api_key (uuid, org_uuid) {
uuid -> Text,
- name -> Text,
- billing_email -> Text,
- private_key -> Nullable<Text>,
- public_key -> Nullable<Text>,
+ org_uuid -> Text,
+ atype -> Integer,
+ api_key -> Text,
+ revision_date -> Timestamp,
}
}
table! {
- sends (uuid) {
+ organizations (uuid) {
uuid -> Text,
- user_uuid -> Nullable<Text>,
- organization_uuid -> Nullable<Text>,
name -> Text,
- notes -> Nullable<Text>,
- atype -> Integer,
- data -> Text,
- akey -> Text,
- password_hash -> Nullable<Binary>,
- password_salt -> Nullable<Binary>,
- password_iter -> Nullable<Integer>,
- max_access_count -> Nullable<Integer>,
- access_count -> Integer,
- creation_date -> Timestamp,
- revision_date -> Timestamp,
- expiration_date -> Nullable<Timestamp>,
- deletion_date -> Timestamp,
- disabled -> Bool,
- hide_email -> Nullable<Bool>,
+ billing_email -> Text,
+ private_key -> Nullable<Text>,
+ public_key -> Nullable<Text>,
}
}
@@ -165,16 +137,6 @@ table! {
}
table! {
- twofactor_incomplete (user_uuid, device_uuid) {
- user_uuid -> Text,
- device_uuid -> Text,
- device_name -> Text,
- login_time -> Timestamp,
- ip_address -> Text,
- }
-}
-
-table! {
users (uuid) {
uuid -> Text,
enabled -> Bool,
@@ -233,131 +195,15 @@ table! {
}
}
-table! {
- organization_api_key (uuid, org_uuid) {
- uuid -> Text,
- org_uuid -> Text,
- atype -> Integer,
- api_key -> Text,
- revision_date -> Timestamp,
- }
-}
-
-table! {
- emergency_access (uuid) {
- uuid -> Text,
- grantor_uuid -> Text,
- grantee_uuid -> Nullable<Text>,
- email -> Nullable<Text>,
- key_encrypted -> Nullable<Text>,
- atype -> Integer,
- status -> Integer,
- wait_time_days -> Integer,
- recovery_initiated_at -> Nullable<Timestamp>,
- last_notification_at -> Nullable<Timestamp>,
- updated_at -> Timestamp,
- created_at -> Timestamp,
- }
-}
-
-table! {
- groups (uuid) {
- uuid -> Text,
- organizations_uuid -> Text,
- name -> Text,
- access_all -> Bool,
- external_id -> Nullable<Text>,
- creation_date -> Timestamp,
- revision_date -> Timestamp,
- }
-}
-
-table! {
- groups_users (groups_uuid, users_organizations_uuid) {
- groups_uuid -> Text,
- users_organizations_uuid -> Text,
- }
-}
-
-table! {
- collections_groups (collections_uuid, groups_uuid) {
- collections_uuid -> Text,
- groups_uuid -> Text,
- read_only -> Bool,
- hide_passwords -> Bool,
- }
-}
-
-table! {
- auth_requests (uuid) {
- uuid -> Text,
- user_uuid -> Text,
- organization_uuid -> Nullable<Text>,
- request_device_identifier -> Text,
- device_type -> Integer,
- request_ip -> Text,
- response_device_id -> Nullable<Text>,
- access_code -> Text,
- public_key -> Text,
- enc_key -> Nullable<Text>,
- master_password_hash -> Nullable<Text>,
- approved -> Nullable<Bool>,
- creation_date -> Timestamp,
- response_date -> Nullable<Timestamp>,
- authentication_date -> Nullable<Timestamp>,
- }
-}
-
-joinable!(attachments -> ciphers (cipher_uuid));
-joinable!(ciphers -> organizations (organization_uuid));
-joinable!(ciphers -> users (user_uuid));
-joinable!(ciphers_collections -> ciphers (cipher_uuid));
-joinable!(ciphers_collections -> collections (collection_uuid));
-joinable!(collections -> organizations (org_uuid));
-joinable!(devices -> users (user_uuid));
-joinable!(folders -> users (user_uuid));
joinable!(folders_ciphers -> ciphers (cipher_uuid));
joinable!(folders_ciphers -> folders (folder_uuid));
-joinable!(org_policies -> organizations (org_uuid));
-joinable!(sends -> organizations (organization_uuid));
-joinable!(sends -> users (user_uuid));
-joinable!(twofactor -> users (user_uuid));
-joinable!(users_collections -> collections (collection_uuid));
-joinable!(users_collections -> users (user_uuid));
-joinable!(users_organizations -> organizations (org_uuid));
-joinable!(users_organizations -> users (user_uuid));
-joinable!(users_organizations -> ciphers (org_uuid));
-joinable!(organization_api_key -> organizations (org_uuid));
-joinable!(emergency_access -> users (grantor_uuid));
-joinable!(groups -> organizations (organizations_uuid));
-joinable!(groups_users -> users_organizations (users_organizations_uuid));
-joinable!(groups_users -> groups (groups_uuid));
-joinable!(collections_groups -> collections (collections_uuid));
-joinable!(collections_groups -> groups (groups_uuid));
-joinable!(event -> users_organizations (uuid));
-joinable!(auth_requests -> users (user_uuid));
-
allow_tables_to_appear_in_same_query!(
- attachments,
ciphers,
ciphers_collections,
collections,
- devices,
folders,
folders_ciphers,
- invitations,
org_policies,
- organizations,
- sends,
- twofactor,
- users,
users_collections,
users_organizations,
- organization_api_key,
- emergency_access,
- groups,
- groups_users,
- collections_groups,
- event,
- auth_requests,
);
diff --git a/src/error.rs b/src/error.rs
@@ -1,25 +1,20 @@
//
// Error generator macro
//
-use crate::db::models::EventType;
use std::error::Error as StdError;
macro_rules! make_error {
( $( $name:ident ( $ty:ty ): $src_fn:expr, $usr_msg_fun:expr ),+ $(,)? ) => {
const BAD_REQUEST: u16 = 400;
-
- pub enum ErrorKind { $($name( $ty )),+ }
-
- #[derive(Debug)]
- pub struct ErrorEvent { pub event: EventType }
- pub struct Error { message: String, error: ErrorKind, error_code: u16, event: Option<ErrorEvent> }
+ enum ErrorKind { $($name( $ty )),+ }
+ pub struct Error { message: String, error: ErrorKind, error_code: u16 }
$(impl From<$ty> for Error {
fn from(err: $ty) -> Self { Error::from((stringify!($name), err)) }
})+
$(impl<S: Into<String>> From<(S, $ty)> for Error {
fn from(val: (S, $ty)) -> Self {
- Error { message: val.0.into(), error: ErrorKind::$name(val.1), error_code: BAD_REQUEST, event: None }
+ Error { message: val.0.into(), error: ErrorKind::$name(val.1), error_code: BAD_REQUEST }
}
})+
impl StdError for Error {
@@ -54,7 +49,7 @@ use tokio_tungstenite::tungstenite::Error as TungstError;
use webauthn_rs::error::WebauthnError as WebauthnErr;
#[derive(Serialize)]
-pub struct Empty {}
+struct Empty;
// Error struct
// Contains a String error message, meant for the user and an enum variant, with an error of different types.
@@ -146,32 +141,20 @@ impl Error {
pub fn new<M: Into<String>, N: Into<String>>(usr_msg: M, log_msg: N) -> Self {
(usr_msg, log_msg.into()).into()
}
-
+ #[must_use]
pub fn empty() -> Self {
Empty {}.into()
}
-
#[must_use]
- pub fn with_msg<M: Into<String>>(mut self, msg: M) -> Self {
+ fn with_msg<M: Into<String>>(mut self, msg: M) -> Self {
self.message = msg.into();
self
}
-
#[must_use]
pub const fn with_code(mut self, code: u16) -> Self {
self.error_code = code;
self
}
-
- #[must_use]
- pub fn with_event(mut self, event: ErrorEvent) -> Self {
- self.event = Some(event);
- self
- }
-
- pub fn get_event(&self) -> &Option<ErrorEvent> {
- &self.event
- }
}
pub trait MapResult<S> {
@@ -195,7 +178,7 @@ impl<S> MapResult<S> for Option<S> {
self.ok_or_else(|| Error::new(msg, ""))
}
}
-
+#[allow(clippy::unnecessary_wraps)]
const fn _has_source<T>(e: T) -> Option<T> {
Some(e)
}
@@ -228,20 +211,13 @@ fn _api_error(_: &impl std::any::Any, msg: &str) -> String {
//
// Rocket responder impl
//
-use std::io::Cursor;
-
use rocket::http::{ContentType, Status};
use rocket::request::Request;
use rocket::response::{self, Responder, Response};
+use std::io::Cursor;
impl<'r> Responder<'r, 'static> for Error {
fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
- match self.error {
- ErrorKind::Empty(_) => {} // Don't print the error in this situation
- ErrorKind::Simple(_) => {} // Don't print the error in this situation
- _ => {}
- };
-
let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest);
let body = self.to_string();
Response::build()
diff --git a/src/main.rs b/src/main.rs
@@ -1,79 +1,115 @@
-#![forbid(unsafe_code, non_ascii_idents)]
#![deny(
- rust_2018_idioms,
- rust_2021_compatibility,
- noop_method_call,
- pointer_structural_match,
- trivial_casts,
- trivial_numeric_casts,
- unused_import_braces,
- clippy::cast_lossless,
- clippy::clone_on_ref_ptr,
- clippy::equatable_if_let,
- clippy::float_cmp_const,
- clippy::inefficient_to_string,
- clippy::iter_on_empty_collections,
- clippy::iter_on_single_items,
- clippy::linkedlist,
- clippy::macro_use_imports,
- clippy::manual_assert,
- clippy::manual_instant_elapsed,
- clippy::manual_string_new,
- clippy::match_wildcard_for_single_variants,
- clippy::mem_forget,
- clippy::string_add_assign,
- clippy::string_to_string,
- clippy::unnecessary_join,
- clippy::unnecessary_self_imports,
- clippy::unused_async,
- clippy::verbose_file_reads,
- clippy::zero_sized_map_values
+ unsafe_code,
+ unused,
+ warnings,
+ clippy::all,
+ clippy::cargo,
+ clippy::complexity,
+ clippy::correctness,
+ clippy::nursery,
+ clippy::pedantic,
+ clippy::perf,
+ clippy::restriction,
+ clippy::style,
+ clippy::suspicious
+)]
+#![allow(
+ clippy::absolute_paths,
+ clippy::arithmetic_side_effects,
+ clippy::as_conversions,
+ clippy::big_endian_bytes,
+ clippy::blanket_clippy_restriction_lints,
+ clippy::default_numeric_fallback,
+ clippy::doc_markdown,
+ clippy::else_if_without_else,
+ clippy::error_impl_error,
+ clippy::expect_used,
+ clippy::if_then_some_else_none,
+ clippy::implicit_return,
+ clippy::indexing_slicing,
+ clippy::integer_division,
+ clippy::items_after_statements,
+ clippy::let_underscore_must_use,
+ clippy::let_underscore_untyped,
+ clippy::map_err_ignore,
+ clippy::min_ident_chars,
+ clippy::missing_docs_in_private_items,
+ clippy::missing_errors_doc,
+ clippy::missing_trait_methods,
+ clippy::mod_module_files,
+ clippy::module_name_repetitions,
+ clippy::multiple_crate_versions,
+ clippy::multiple_inherent_impl,
+ clippy::multiple_unsafe_ops_per_block,
+ clippy::needless_pass_by_value,
+ clippy::no_effect_underscore_binding,
+ clippy::panic,
+ clippy::panic_in_result_fn,
+ clippy::partial_pub_fields,
+ clippy::pattern_type_mismatch,
+ clippy::pub_use,
+ clippy::question_mark_used,
+ clippy::redundant_type_annotations,
+ clippy::ref_patterns,
+ clippy::shadow_reuse,
+ clippy::shadow_unrelated,
+ clippy::significant_drop_in_scrutinee,
+ clippy::significant_drop_tightening,
+ clippy::single_call_fn,
+ clippy::single_char_lifetime_names,
+ clippy::std_instead_of_alloc,
+ clippy::std_instead_of_core,
+ clippy::string_add,
+ clippy::string_slice,
+ clippy::str_to_string,
+ clippy::too_many_lines,
+ clippy::unreachable,
+ clippy::unseparated_literal_suffix,
+ clippy::unwrap_in_result,
+ clippy::unwrap_used,
+ clippy::used_underscore_binding,
+ clippy::wildcard_enum_match_arm
)]
// The recursion_limit is mainly triggered by the json!() macro.
// The more key/value pairs there are the more recursion occurs.
// We want to keep this as low as possible, but not higher then 128.
// If you go above 128 it will cause rust-analyzer to fail,
#![recursion_limit = "103"]
-use priv_sep::unveil_create_read_write;
-
-#[macro_use]
-extern crate rocket;
-#[macro_use]
-extern crate serde;
-#[macro_use]
-extern crate serde_json;
+extern crate alloc;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
-
-use std::{fs::create_dir_all, path::Path, process::exit};
#[macro_use]
mod error;
+#[macro_use]
+extern crate rocket;
+#[macro_use]
+extern crate serde;
+#[macro_use]
+extern crate serde_json;
mod api;
mod auth;
mod config;
mod crypto;
-#[macro_use]
mod db;
mod priv_sep;
mod util;
+use alloc::sync::Arc;
use config::Config;
pub use error::{Error, MapResult};
+use priv_sep::unveil_create_read_write;
use std::env;
use std::path::PathBuf;
-use std::sync::Arc;
+use std::{fs::create_dir_all, path::Path, process::exit};
use tokio::runtime::Builder;
fn main() -> Result<(), Error> {
let mut promises = priv_sep::pledge_init()?;
- let mut cur_dir = env::current_dir()?;
+ let cur_dir = env::current_dir()?;
priv_sep::unveil_read(cur_dir.as_path())?;
- static_init(&mut cur_dir);
- cur_dir.push(Config::DATA_FOLDER);
- unveil_create_read_write(cur_dir)?;
+ static_init(cur_dir);
check_data_folder();
- auth::init_rsa_keys().expect("error creating or reading RSA keys");
check_web_vault();
create_dir(Config::ATTACHMENTS_FOLDER, "attachments folder");
create_dir(Config::ICON_CACHE_FOLDER, "icon cache");
@@ -104,8 +140,12 @@ fn main() -> Result<(), Error> {
)
}
#[inline]
-fn static_init(cur_dir: &mut PathBuf) {
- config::init_config(cur_dir);
+fn static_init(mut cur_dir: PathBuf) {
+ config::init_config(&mut cur_dir);
+ cur_dir.push(Config::DATA_FOLDER);
+ unveil_create_read_write(cur_dir.as_path()).unwrap_or_else(|_| {
+ panic!("unable to unveil(2) {cur_dir:?} with create, read, and write permissions",)
+ });
auth::init_values();
api::init_ws_users();
api::init_ws_anonymous_subscriptions();
@@ -118,6 +158,7 @@ fn create_dir(path: &str, description: &str) {
create_dir_all(path).expect(&err_msg);
}
+#[allow(clippy::exit)]
fn check_data_folder() {
let data_folder = Config::DATA_FOLDER;
let path = Path::new(data_folder);
@@ -128,7 +169,7 @@ fn check_data_folder() {
exit(1);
}
}
-
+#[allow(clippy::exit)]
fn check_web_vault() {
if !config::get_config().web_vault_enabled {
return;
@@ -138,19 +179,19 @@ fn check_web_vault() {
exit(1);
}
}
-#[allow(clippy::cast_lossless)]
+#[allow(clippy::as_conversions, clippy::cast_lossless, clippy::exit)]
async fn create_db_pool() -> db::DbPool {
- match util::retry_db(
+ (util::retry_db(
db::DbPool::from_config,
config::get_config().db_connection_retries.get() as u32,
)
- .await
- {
- Ok(p) => p,
- Err(_) => {
- exit(1);
- }
- }
+ .await)
+ .map_or_else(
+ |_| {
+ exit(1);
+ },
+ |p| p,
+ )
}
async fn launch_rocket(pool: db::DbPool) -> Result<(), Error> {
@@ -172,8 +213,8 @@ async fn launch_rocket(pool: db::DbPool) -> Result<(), Error> {
.manage(pool)
.manage(api::start_notification_server())
.manage(Arc::clone(api::ws_anonymous_subscriptions()))
- .attach(util::AppHeaders())
- .attach(util::Cors())
+ .attach(util::AppHeaders)
+ .attach(util::Cors)
.ignite()
.await?;
let shutdown = instance.shutdown();
diff --git a/src/static/images/hibp.png b/src/static/images/hibp.png
Binary files differ.
diff --git a/src/static/images/mail-github.png b/src/static/images/mail-github.png
Binary files differ.
diff --git a/src/static/scripts/admin.css b/src/static/scripts/admin.css
@@ -1,56 +0,0 @@
-body {
- padding-top: 75px;
-}
-img {
- width: 48px;
- height: 48px;
-}
-.vaultwarden-icon {
- height: 32px;
- width: auto;
- margin: -5px 0 0 0;
-}
-/* Special alert-row class to use Bootstrap v5.2+ variable colors */
-.alert-row {
- --bs-alert-border: 1px solid var(--bs-alert-border-color);
- color: var(--bs-alert-color);
- background-color: var(--bs-alert-bg);
- border: var(--bs-alert-border);
-}
-
-#users-table .vw-account-details {
- min-width: 250px;
-}
-#users-table .vw-created-at, #users-table .vw-last-active {
- min-width: 85px;
- max-width: 85px;
-}
-#users-table .vw-entries, #orgs-table .vw-users, #orgs-table .vw-entries {
- min-width: 35px;
- max-width: 40px;
-}
-#orgs-table .vw-misc {
- min-width: 65px;
- max-width: 80px;
-}
-#users-table .vw-attachments, #orgs-table .vw-attachments {
- min-width: 100px;
- max-width: 130px;
-}
-#users-table .vw-actions, #orgs-table .vw-actions {
- min-width: 130px;
- max-width: 130px;
-}
-#users-table .vw-org-cell {
- max-height: 120px;
-}
-#orgs-table .vw-org-details {
- min-width: 285px;
-}
-
-#support-string {
- height: 16rem;
-}
-.vw-copy-toast {
- width: 15rem;
-}
diff --git a/src/static/scripts/admin.js b/src/static/scripts/admin.js
@@ -1,149 +0,0 @@
-"use strict";
-/* eslint-env es2017, browser */
-/* exported BASE_URL, _post */
-
-function getBaseUrl() {
- // If the base URL is `https://vaultwarden.example.com/base/path/admin/`,
- // `window.location.href` should have one of the following forms:
- //
- // - `https://vaultwarden.example.com/base/path/admin`
- // - `https://vaultwarden.example.com/base/path/admin/#/some/route[?queryParam=...]`
- //
- // We want to get to just `https://vaultwarden.example.com/base/path`.
- const pathname = window.location.pathname;
- const adminPos = pathname.indexOf("/admin");
- const newPathname = pathname.substring(0, adminPos != -1 ? adminPos : pathname.length);
- return `${window.location.origin}${newPathname}`;
-}
-const BASE_URL = getBaseUrl();
-
-function reload() {
- // Reload the page by setting the exact same href
- // Using window.location.reload() could cause a repost.
- window.location = window.location.href;
-}
-
-function msg(text, reload_page = true) {
- text && alert(text);
- reload_page && reload();
-}
-
-function _post(url, successMsg, errMsg, body, reload_page = true) {
- let respStatus;
- let respStatusText;
- fetch(url, {
- method: "POST",
- body: body,
- mode: "same-origin",
- credentials: "same-origin",
- headers: { "Content-Type": "application/json" }
- }).then(resp => {
- if (resp.ok) {
- msg(successMsg, reload_page);
- // Abuse the catch handler by setting error to false and continue
- return Promise.reject({ error: false });
- }
- respStatus = resp.status;
- respStatusText = resp.statusText;
- return resp.text();
- }).then(respText => {
- try {
- const respJson = JSON.parse(respText);
- if (respJson.ErrorModel && respJson.ErrorModel.Message) {
- return respJson.ErrorModel.Message;
- } else {
- return Promise.reject({ body: `${respStatus} - ${respStatusText}\n\nUnknown error`, error: true });
- }
- } catch (e) {
- return Promise.reject({ body: `${respStatus} - ${respStatusText}\n\n[Catch] ${e}`, error: true });
- }
- }).then(apiMsg => {
- msg(`${errMsg}\n${apiMsg}`, reload_page);
- }).catch(e => {
- if (e.error === false) { return true; }
- else { msg(`${errMsg}\n${e.body}`, reload_page); }
- });
-}
-
-// Bootstrap Theme Selector
-const getStoredTheme = () => localStorage.getItem("theme");
-const setStoredTheme = theme => localStorage.setItem("theme", theme);
-
-const getPreferredTheme = () => {
- const storedTheme = getStoredTheme();
- if (storedTheme) {
- return storedTheme;
- }
-
- return window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light";
-};
-
-const setTheme = theme => {
- if (theme === "auto" && window.matchMedia("(prefers-color-scheme: dark)").matches) {
- document.documentElement.setAttribute("data-bs-theme", "dark");
- } else {
- document.documentElement.setAttribute("data-bs-theme", theme);
- }
-};
-
-setTheme(getPreferredTheme());
-
-const showActiveTheme = (theme, focus = false) => {
- const themeSwitcher = document.querySelector("#bd-theme");
-
- if (!themeSwitcher) {
- return;
- }
-
- const themeSwitcherText = document.querySelector("#bd-theme-text");
- const activeThemeIcon = document.querySelector(".theme-icon-active use");
- const btnToActive = document.querySelector(`[data-bs-theme-value="${theme}"]`);
- const svgOfActiveBtn = btnToActive.querySelector("span use").innerText;
-
- document.querySelectorAll("[data-bs-theme-value]").forEach(element => {
- element.classList.remove("active");
- element.setAttribute("aria-pressed", "false");
- });
-
- btnToActive.classList.add("active");
- btnToActive.setAttribute("aria-pressed", "true");
- activeThemeIcon.innerText = svgOfActiveBtn;
- const themeSwitcherLabel = `${themeSwitcherText.textContent} (${btnToActive.dataset.bsThemeValue})`;
- themeSwitcher.setAttribute("aria-label", themeSwitcherLabel);
-
- if (focus) {
- themeSwitcher.focus();
- }
-};
-
-window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", () => {
- const storedTheme = getStoredTheme();
- if (storedTheme !== "light" && storedTheme !== "dark") {
- setTheme(getPreferredTheme());
- }
-});
-
-
-// onLoad events
-document.addEventListener("DOMContentLoaded", (/*event*/) => {
- showActiveTheme(getPreferredTheme());
-
- document.querySelectorAll("[data-bs-theme-value]")
- .forEach(toggle => {
- toggle.addEventListener("click", () => {
- const theme = toggle.getAttribute("data-bs-theme-value");
- setStoredTheme(theme);
- setTheme(theme);
- showActiveTheme(theme, true);
- });
- });
-
- // get current URL path and assign "active" class to the correct nav-item
- const pathname = window.location.pathname;
- if (pathname === "") return;
- const navItem = document.querySelectorAll(`.navbar-nav .nav-item a[href="${pathname}"]`);
- if (navItem.length === 1) {
- navItem[0].className = navItem[0].className + " active";
- navItem[0].setAttribute("aria-current", "page");
- }
-});
-\ No newline at end of file
diff --git a/src/static/scripts/admin_diagnostics.js b/src/static/scripts/admin_diagnostics.js
@@ -1,241 +0,0 @@
-"use strict";
-/* eslint-env es2017, browser */
-/* global BASE_URL:readable, bootstrap:readable */
-
-var dnsCheck = false;
-var timeCheck = false;
-var ntpTimeCheck = false;
-var domainCheck = false;
-var httpsCheck = false;
-
-// ================================
-// Date & Time Check
-const d = new Date();
-const year = d.getUTCFullYear();
-const month = String(d.getUTCMonth()+1).padStart(2, "0");
-const day = String(d.getUTCDate()).padStart(2, "0");
-const hour = String(d.getUTCHours()).padStart(2, "0");
-const minute = String(d.getUTCMinutes()).padStart(2, "0");
-const seconds = String(d.getUTCSeconds()).padStart(2, "0");
-const browserUTC = `${year}-${month}-${day} ${hour}:${minute}:${seconds} UTC`;
-
-// ================================
-// Check if the output is a valid IP
-const isValidIp = value => (/^(?:(?:^|\.)(?:2(?:5[0-5]|[0-4]\d)|1?\d?\d)){4}$/.test(value) ? true : false);
-
-function checkVersions(platform, installed, latest, commit=null) {
- if (installed === "-" || latest === "-") {
- document.getElementById(`${platform}-failed`).classList.remove("d-none");
- return;
- }
-
- // Only check basic versions, no commit revisions
- if (commit === null || installed.indexOf("-") === -1) {
- if (installed !== latest) {
- document.getElementById(`${platform}-warning`).classList.remove("d-none");
- } else {
- document.getElementById(`${platform}-success`).classList.remove("d-none");
- }
- } else {
- // Check if this is a branched version.
- const branchRegex = /(?:\s)\((.*?)\)/;
- const branchMatch = installed.match(branchRegex);
- if (branchMatch !== null) {
- document.getElementById(`${platform}-branch`).classList.remove("d-none");
- }
-
- // This will remove branch info and check if there is a commit hash
- const installedRegex = /(\d+\.\d+\.\d+)-(\w+)/;
- const instMatch = installed.match(installedRegex);
-
- // It could be that a new tagged version has the same commit hash.
- // In this case the version is the same but only the number is different
- if (instMatch !== null) {
- if (instMatch[2] === commit) {
- // The commit hashes are the same, so latest version is installed
- document.getElementById(`${platform}-success`).classList.remove("d-none");
- return;
- }
- }
-
- if (installed === latest) {
- document.getElementById(`${platform}-success`).classList.remove("d-none");
- } else {
- document.getElementById(`${platform}-warning`).classList.remove("d-none");
- }
- }
-}
-
-// ================================
-// Generate support string to be pasted on github or the forum
-async function generateSupportString(event, dj) {
- event.preventDefault();
- event.stopPropagation();
-
- let supportString = "### Your environment (Generated via diagnostics page)\n";
-
- supportString += `* Vaultwarden version: v${dj.current_release}\n`;
- supportString += `* Web-vault version: v${dj.web_vault_version}\n`;
- supportString += `* OS/Arch: ${dj.host_os}/${dj.host_arch}\n`;
- supportString += `* Running within Docker: ${dj.running_within_docker} (Base: ${dj.docker_base_image})\n`;
- supportString += "* Environment settings overridden: ";
- if (dj.overrides != "") {
- supportString += "true\n";
- } else {
- supportString += "false\n";
- }
- supportString += `* Uses a reverse proxy: ${dj.ip_header_exists}\n`;
- if (dj.ip_header_exists) {
- supportString += `* IP Header check: ${dj.ip_header_match} (${dj.ip_header_name})\n`;
- }
- supportString += `* Internet access: ${dj.has_http_access}\n`;
- supportString += `* Internet access via a proxy: ${dj.uses_proxy}\n`;
- supportString += `* DNS Check: ${dnsCheck}\n`;
- supportString += `* Browser/Server Time Check: ${timeCheck}\n`;
- supportString += `* Server/NTP Time Check: ${ntpTimeCheck}\n`;
- supportString += `* Domain Configuration Check: ${domainCheck}\n`;
- supportString += `* HTTPS Check: ${httpsCheck}\n`;
- supportString += `* Database type: ${dj.db_type}\n`;
- supportString += `* Database version: ${dj.db_version}\n`;
- supportString += "* Clients used: \n";
- supportString += "* Reverse proxy and version: \n";
- supportString += "* Other relevant information: \n";
-
- const jsonResponse = await fetch(`${BASE_URL}/admin/diagnostics/config`, {
- "headers": { "Accept": "application/json" }
- });
- if (!jsonResponse.ok) {
- alert("Generation failed: " + jsonResponse.statusText);
- throw new Error(jsonResponse);
- }
- const configJson = await jsonResponse.json();
- supportString += "\n### Config (Generated via diagnostics page)\n<details><summary>Show Running Config</summary>\n";
- supportString += `\n**Environment settings which are overridden:** ${dj.overrides}\n`;
- supportString += "\n\n```json\n" + JSON.stringify(configJson, undefined, 2) + "\n```\n</details>\n";
-
- document.getElementById("support-string").innerText = supportString;
- document.getElementById("support-string").classList.remove("d-none");
- document.getElementById("copy-support").classList.remove("d-none");
-}
-
-function copyToClipboard(event) {
- event.preventDefault();
- event.stopPropagation();
-
- const supportStr = document.getElementById("support-string").innerText;
- const tmpCopyEl = document.createElement("textarea");
-
- tmpCopyEl.setAttribute("id", "copy-support-string");
- tmpCopyEl.setAttribute("readonly", "");
- tmpCopyEl.value = supportStr;
- tmpCopyEl.style.position = "absolute";
- tmpCopyEl.style.left = "-9999px";
- document.body.appendChild(tmpCopyEl);
- tmpCopyEl.select();
- document.execCommand("copy");
- tmpCopyEl.remove();
-
- new bootstrap.Toast("#toastClipboardCopy").show();
-}
-
-function checkTimeDrift(utcTimeA, utcTimeB, statusPrefix) {
- const timeDrift = (
- Date.parse(utcTimeA.replace(" ", "T").replace(" UTC", "")) -
- Date.parse(utcTimeB.replace(" ", "T").replace(" UTC", ""))
- ) / 1000;
- if (timeDrift > 15 || timeDrift < -15) {
- document.getElementById(`${statusPrefix}-warning`).classList.remove("d-none");
- return false;
- } else {
- document.getElementById(`${statusPrefix}-success`).classList.remove("d-none");
- return true;
- }
-}
-
-function checkDomain(browserURL, serverURL) {
- if (serverURL == browserURL) {
- document.getElementById("domain-success").classList.remove("d-none");
- domainCheck = true;
- } else {
- document.getElementById("domain-warning").classList.remove("d-none");
- }
-
- // Check for HTTPS at domain-server-string
- if (serverURL.startsWith("https://") ) {
- document.getElementById("https-success").classList.remove("d-none");
- httpsCheck = true;
- } else {
- document.getElementById("https-warning").classList.remove("d-none");
- }
-}
-
-function initVersionCheck(dj) {
- const serverInstalled = dj.current_release;
- const serverLatest = dj.latest_release;
- const serverLatestCommit = dj.latest_commit;
-
- if (serverInstalled.indexOf("-") !== -1 && serverLatest !== "-" && serverLatestCommit !== "-") {
- document.getElementById("server-latest-commit").classList.remove("d-none");
- }
- checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
-
- if (!dj.running_within_docker) {
- const webInstalled = dj.web_vault_version;
- const webLatest = dj.latest_web_build;
- checkVersions("web", webInstalled, webLatest);
- }
-}
-
-function checkDns(dns_resolved) {
- if (isValidIp(dns_resolved)) {
- document.getElementById("dns-success").classList.remove("d-none");
- dnsCheck = true;
- } else {
- document.getElementById("dns-warning").classList.remove("d-none");
- }
-}
-
-function init(dj) {
- // Time check
- document.getElementById("time-browser-string").innerText = browserUTC;
-
- // Check if we were able to fetch a valid NTP Time
- // If so, compare both browser and server with NTP
- // Else, compare browser and server.
- if (dj.ntp_time.indexOf("UTC") !== -1) {
- timeCheck = checkTimeDrift(dj.server_time, browserUTC, "time");
- checkTimeDrift(dj.ntp_time, browserUTC, "ntp-browser");
- ntpTimeCheck = checkTimeDrift(dj.ntp_time, dj.server_time, "ntp-server");
- } else {
- timeCheck = checkTimeDrift(dj.server_time, browserUTC, "time");
- ntpTimeCheck = "n/a";
- }
-
- // Domain check
- const browserURL = location.href.toLowerCase();
- document.getElementById("domain-browser-string").innerText = browserURL;
- checkDomain(browserURL, dj.admin_url.toLowerCase());
-
- // Version check
- initVersionCheck(dj);
-
- // DNS Check
- checkDns(dj.dns_resolved);
-}
-
-// onLoad events
-document.addEventListener("DOMContentLoaded", (event) => {
- const diag_json = JSON.parse(document.getElementById("diagnostics_json").innerText);
- init(diag_json);
-
- const btnGenSupport = document.getElementById("gen-support");
- if (btnGenSupport) {
- btnGenSupport.addEventListener("click", () => {
- generateSupportString(event, diag_json);
- });
- }
- const btnCopySupport = document.getElementById("copy-support");
- if (btnCopySupport) {
- btnCopySupport.addEventListener("click", copyToClipboard);
- }
-});
diff --git a/src/static/scripts/admin_organizations.js b/src/static/scripts/admin_organizations.js
@@ -1,70 +0,0 @@
-"use strict";
-/* eslint-env es2017, browser, jquery */
-/* global _post:readable, BASE_URL:readable, reload:readable, jdenticon:readable */
-
-function deleteOrganization(event) {
- event.preventDefault();
- event.stopPropagation();
- const org_uuid = event.target.dataset.vwOrgUuid;
- const org_name = event.target.dataset.vwOrgName;
- const billing_email = event.target.dataset.vwBillingEmail;
- if (!org_uuid) {
- alert("Required parameters not found!");
- return false;
- }
-
- // First make sure the user wants to delete this organization
- const continueDelete = confirm(`WARNING: All data of this organization (${org_name}) will be lost!\nMake sure you have a backup, this cannot be undone!`);
- if (continueDelete == true) {
- const input_org_uuid = prompt(`To delete the organization "${org_name} (${billing_email})", please type the organization uuid below.`);
- if (input_org_uuid != null) {
- if (input_org_uuid == org_uuid) {
- _post(`${BASE_URL}/admin/organizations/${org_uuid}/delete`,
- "Organization deleted correctly",
- "Error deleting organization"
- );
- } else {
- alert("Wrong organization uuid, please try again");
- }
- }
- }
-}
-
-function initActions() {
- document.querySelectorAll("button[vw-delete-organization]").forEach(btn => {
- btn.addEventListener("click", deleteOrganization);
- });
-
- if (jdenticon) {
- jdenticon();
- }
-}
-
-// onLoad events
-document.addEventListener("DOMContentLoaded", (/*event*/) => {
- jQuery("#orgs-table").DataTable({
- "drawCallback": function() {
- initActions();
- },
- "stateSave": true,
- "responsive": true,
- "lengthMenu": [
- [-1, 5, 10, 25, 50],
- ["All", 5, 10, 25, 50]
- ],
- "pageLength": -1, // Default show all
- "columnDefs": [{
- "targets": [4,5],
- "searchable": false,
- "orderable": false
- }]
- });
-
- // Add click events for organization actions
- initActions();
-
- const btnReload = document.getElementById("reload");
- if (btnReload) {
- btnReload.addEventListener("click", reload);
- }
-});
-\ No newline at end of file
diff --git a/src/static/scripts/admin_settings.js b/src/static/scripts/admin_settings.js
@@ -1,232 +0,0 @@
-"use strict";
-/* eslint-env es2017, browser */
-/* global _post:readable, BASE_URL:readable */
-
-function smtpTest(event) {
- event.preventDefault();
- event.stopPropagation();
- if (formHasChanges(config_form)) {
- alert("Config has been changed but not yet saved.\nPlease save the changes first before sending a test email.");
- return false;
- }
-
- const test_email = document.getElementById("smtp-test-email");
-
- // Do a very very basic email address check.
- if (test_email.value.match(/\S+@\S+/i) === null) {
- test_email.parentElement.classList.add("was-validated");
- return false;
- }
-
- const data = JSON.stringify({ "email": test_email.value });
- _post(`${BASE_URL}/admin/test/smtp`,
- "SMTP Test email sent correctly",
- "Error sending SMTP test email",
- data, false
- );
-}
-
-function getFormData() {
- let data = {};
-
- document.querySelectorAll(".conf-checkbox").forEach(function (e) {
- data[e.name] = e.checked;
- });
-
- document.querySelectorAll(".conf-number").forEach(function (e) {
- data[e.name] = e.value ? +e.value : null;
- });
-
- document.querySelectorAll(".conf-text, .conf-password").forEach(function (e) {
- data[e.name] = e.value || null;
- });
- return data;
-}
-
-function saveConfig(event) {
- const data = JSON.stringify(getFormData());
- _post(`${BASE_URL}/admin/config`,
- "Config saved correctly",
- "Error saving config",
- data
- );
- event.preventDefault();
-}
-
-function deleteConf(event) {
- event.preventDefault();
- event.stopPropagation();
- const input = prompt(
- "This will remove all user configurations, and restore the defaults and the " +
- "values set by the environment. This operation could be dangerous. Type 'DELETE' to proceed:"
- );
- if (input === "DELETE") {
- _post(`${BASE_URL}/admin/config/delete`,
- "Config deleted correctly",
- "Error deleting config"
- );
- } else {
- alert("Wrong input, please try again");
- }
-}
-
-function backupDatabase(event) {
- event.preventDefault();
- event.stopPropagation();
- _post(`${BASE_URL}/admin/config/backup_db`,
- "Backup created successfully",
- "Error creating backup", null, false
- );
-}
-
-// Two functions to help check if there were changes to the form fields
-// Useful for example during the smtp test to prevent people from clicking save before testing there new settings
-function initChangeDetection(form) {
- const ignore_fields = ["smtp-test-email"];
- Array.from(form).forEach((el) => {
- if (! ignore_fields.includes(el.id)) {
- el.dataset.origValue = el.value;
- }
- });
-}
-
-function formHasChanges(form) {
- return Array.from(form).some(el => "origValue" in el.dataset && ( el.dataset.origValue !== el.value));
-}
-
-// This function will prevent submitting a from when someone presses enter.
-function preventFormSubmitOnEnter(form) {
- if (form) {
- form.addEventListener("keypress", (event) => {
- if (event.key == "Enter") {
- event.preventDefault();
- }
- });
- }
-}
-
-// This function will hook into the smtp-test-email input field and will call the smtpTest() function when enter is pressed.
-function submitTestEmailOnEnter() {
- const smtp_test_email_input = document.getElementById("smtp-test-email");
- if (smtp_test_email_input) {
- smtp_test_email_input.addEventListener("keypress", (event) => {
- if (event.key == "Enter") {
- event.preventDefault();
- smtpTest(event);
- }
- });
- }
-}
-
-// Colorize some settings which are high risk
-function colorRiskSettings() {
- const risk_items = document.getElementsByClassName("col-form-label");
- Array.from(risk_items).forEach((el) => {
- if (el.innerText.toLowerCase().includes("risks") ) {
- el.parentElement.className += " alert-danger";
- }
- });
-}
-
-function toggleVis(event) {
- event.preventDefault();
- event.stopPropagation();
-
- const elem = document.getElementById(event.target.dataset.vwPwToggle);
- const type = elem.getAttribute("type");
- if (type === "text") {
- elem.setAttribute("type", "password");
- } else {
- elem.setAttribute("type", "text");
- }
-}
-
-function masterCheck(check_id, inputs_query) {
- function onChanged(checkbox, inputs_query) {
- return function _fn() {
- document.querySelectorAll(inputs_query).forEach(function (e) { e.disabled = !checkbox.checked; });
- checkbox.disabled = false;
- };
- }
-
- const checkbox = document.getElementById(check_id);
- if (checkbox) {
- const onChange = onChanged(checkbox, inputs_query);
- onChange(); // Trigger the event initially
- checkbox.addEventListener("change", onChange);
- }
-}
-
-// This will check if the ADMIN_TOKEN is not a Argon2 hashed value.
-// Else it will show a warning, unless someone has closed it.
-// Then it will not show this warning for 30 days.
-function checkAdminToken() {
- const admin_token = document.getElementById("input_admin_token");
- const disable_admin_token = document.getElementById("input_disable_admin_token");
- if (!disable_admin_token.checked && !admin_token.value.startsWith("$argon2")) {
- // Check if the warning has been closed before and 30 days have passed
- const admin_token_warning_closed = localStorage.getItem("admin_token_warning_closed");
- if (admin_token_warning_closed !== null) {
- const closed_date = new Date(parseInt(admin_token_warning_closed));
- const current_date = new Date();
- const thirtyDays = 1000*60*60*24*30;
- if (current_date - closed_date < thirtyDays) {
- return;
- }
- }
-
- // When closing the alert, store the current date/time in the browser
- const admin_token_warning = document.getElementById("admin_token_warning");
- admin_token_warning.addEventListener("closed.bs.alert", function() {
- const d = new Date();
- localStorage.setItem("admin_token_warning_closed", d.getTime());
- });
-
- // Display the warning
- admin_token_warning.classList.remove("d-none");
- }
-}
-
-// This will check for specific configured values, and when needed will show a warning div
-function showWarnings() {
- checkAdminToken();
-}
-
-const config_form = document.getElementById("config-form");
-
-// onLoad events
-document.addEventListener("DOMContentLoaded", (/*event*/) => {
- initChangeDetection(config_form);
- // Prevent enter to submitting the form and save the config.
- // Users need to really click on save, this also to prevent accidental submits.
- preventFormSubmitOnEnter(config_form);
-
- submitTestEmailOnEnter();
- colorRiskSettings();
-
- document.querySelectorAll("input[id^='input__enable_']").forEach(group_toggle => {
- const input_id = group_toggle.id.replace("input__enable_", "#g_");
- masterCheck(group_toggle.id, `${input_id} input`);
- });
-
- document.querySelectorAll("button[data-vw-pw-toggle]").forEach(password_toggle_btn => {
- password_toggle_btn.addEventListener("click", toggleVis);
- });
-
- const btnBackupDatabase = document.getElementById("backupDatabase");
- if (btnBackupDatabase) {
- btnBackupDatabase.addEventListener("click", backupDatabase);
- }
- const btnDeleteConf = document.getElementById("deleteConf");
- if (btnDeleteConf) {
- btnDeleteConf.addEventListener("click", deleteConf);
- }
- const btnSmtpTest = document.getElementById("smtpTest");
- if (btnSmtpTest) {
- btnSmtpTest.addEventListener("click", smtpTest);
- }
-
- config_form.addEventListener("submit", saveConfig);
-
- showWarnings();
-});
-\ No newline at end of file
diff --git a/src/static/scripts/admin_users.js b/src/static/scripts/admin_users.js
@@ -1,304 +0,0 @@
-"use strict";
-/* eslint-env es2017, browser, jquery */
-/* global _post:readable, BASE_URL:readable, reload:readable, jdenticon:readable */
-
-function deleteUser(event) {
- event.preventDefault();
- event.stopPropagation();
- const id = event.target.parentNode.dataset.vwUserUuid;
- const email = event.target.parentNode.dataset.vwUserEmail;
- if (!id || !email) {
- alert("Required parameters not found!");
- return false;
- }
- const input_email = prompt(`To delete user "${email}", please type the email below`);
- if (input_email != null) {
- if (input_email == email) {
- _post(`${BASE_URL}/admin/users/${id}/delete`,
- "User deleted correctly",
- "Error deleting user"
- );
- } else {
- alert("Wrong email, please try again");
- }
- }
-}
-
-function remove2fa(event) {
- event.preventDefault();
- event.stopPropagation();
- const id = event.target.parentNode.dataset.vwUserUuid;
- const email = event.target.parentNode.dataset.vwUserEmail;
- if (!id || !email) {
- alert("Required parameters not found!");
- return false;
- }
- const confirmed = confirm(`Are you sure you want to remove 2FA for "${email}"?`);
- if (confirmed) {
- _post(`${BASE_URL}/admin/users/${id}/remove-2fa`,
- "2FA removed correctly",
- "Error removing 2FA"
- );
- }
-}
-
-function deauthUser(event) {
- event.preventDefault();
- event.stopPropagation();
- const id = event.target.parentNode.dataset.vwUserUuid;
- const email = event.target.parentNode.dataset.vwUserEmail;
- if (!id || !email) {
- alert("Required parameters not found!");
- return false;
- }
- const confirmed = confirm(`Are you sure you want to deauthorize sessions for "${email}"?`);
- if (confirmed) {
- _post(`${BASE_URL}/admin/users/${id}/deauth`,
- "Sessions deauthorized correctly",
- "Error deauthorizing sessions"
- );
- }
-}
-
-function disableUser(event) {
- event.preventDefault();
- event.stopPropagation();
- const id = event.target.parentNode.dataset.vwUserUuid;
- const email = event.target.parentNode.dataset.vwUserEmail;
- if (!id || !email) {
- alert("Required parameters not found!");
- return false;
- }
- const confirmed = confirm(`Are you sure you want to disable user "${email}"? This will also deauthorize their sessions.`);
- if (confirmed) {
- _post(`${BASE_URL}/admin/users/${id}/disable`,
- "User disabled successfully",
- "Error disabling user"
- );
- }
-}
-
-function enableUser(event) {
- event.preventDefault();
- event.stopPropagation();
- const id = event.target.parentNode.dataset.vwUserUuid;
- const email = event.target.parentNode.dataset.vwUserEmail;
- if (!id || !email) {
- alert("Required parameters not found!");
- return false;
- }
- const confirmed = confirm(`Are you sure you want to enable user "${email}"?`);
- if (confirmed) {
- _post(`${BASE_URL}/admin/users/${id}/enable`,
- "User enabled successfully",
- "Error enabling user"
- );
- }
-}
-
-function updateRevisions(event) {
- event.preventDefault();
- event.stopPropagation();
- _post(`${BASE_URL}/admin/users/update_revision`,
- "Success, clients will sync next time they connect",
- "Error forcing clients to sync"
- );
-}
-
-function inviteUser(event) {
- event.preventDefault();
- event.stopPropagation();
- const email = document.getElementById("inviteEmail");
- const data = JSON.stringify({
- "email": email.value
- });
- email.value = "";
- _post(`${BASE_URL}/admin/invite`,
- "User invited correctly",
- "Error inviting user",
- data
- );
-}
-
-function resendUserInvite (event) {
- event.preventDefault();
- event.stopPropagation();
- const id = event.target.parentNode.dataset.vwUserUuid;
- const email = event.target.parentNode.dataset.vwUserEmail;
- if (!id || !email) {
- alert("Required parameters not found!");
- return false;
- }
- const confirmed = confirm(`Are you sure you want to resend invitation for "${email}"?`);
- if (confirmed) {
- _post(`${BASE_URL}/admin/users/${id}/invite/resend`,
- "Invite sent successfully",
- "Error resend invite"
- );
- }
-}
-
-const ORG_TYPES = {
- "0": {
- "name": "Owner",
- "bg": "orange",
- "font": "black"
- },
- "1": {
- "name": "Admin",
- "bg": "blueviolet"
- },
- "2": {
- "name": "User",
- "bg": "blue"
- },
- "3": {
- "name": "Manager",
- "bg": "green"
- },
-};
-
-// Special sort function to sort dates in ISO format
-jQuery.extend(jQuery.fn.dataTableExt.oSort, {
- "date-iso-pre": function(a) {
- let x;
- const sortDate = a.replace(/(<([^>]+)>)/gi, "").trim();
- if (sortDate !== "") {
- const dtParts = sortDate.split(" ");
- const timeParts = (undefined != dtParts[1]) ? dtParts[1].split(":") : ["00", "00", "00"];
- const dateParts = dtParts[0].split("-");
- x = (dateParts[0] + dateParts[1] + dateParts[2] + timeParts[0] + timeParts[1] + ((undefined != timeParts[2]) ? timeParts[2] : 0)) * 1;
- if (isNaN(x)) {
- x = 0;
- }
- } else {
- x = Infinity;
- }
- return x;
- },
-
- "date-iso-asc": function(a, b) {
- return a - b;
- },
-
- "date-iso-desc": function(a, b) {
- return b - a;
- }
-});
-
-const userOrgTypeDialog = document.getElementById("userOrgTypeDialog");
-// Fill the form and title
-userOrgTypeDialog.addEventListener("show.bs.modal", function(event) {
- // Get shared values
- const userEmail = event.relatedTarget.parentNode.dataset.vwUserEmail;
- const userUuid = event.relatedTarget.parentNode.dataset.vwUserUuid;
- // Get org specific values
- const userOrgType = event.relatedTarget.dataset.vwOrgType;
- const userOrgTypeName = ORG_TYPES[userOrgType]["name"];
- const orgName = event.relatedTarget.dataset.vwOrgName;
- const orgUuid = event.relatedTarget.dataset.vwOrgUuid;
-
- document.getElementById("userOrgTypeDialogTitle").innerHTML = `<b>Update User Type:</b><br><b>Organization:</b> ${orgName}<br><b>User:</b> ${userEmail}`;
- document.getElementById("userOrgTypeUserUuid").value = userUuid;
- document.getElementById("userOrgTypeOrgUuid").value = orgUuid;
- document.getElementById(`userOrgType${userOrgTypeName}`).checked = true;
-}, false);
-
-// Prevent accidental submission of the form with valid elements after the modal has been hidden.
-userOrgTypeDialog.addEventListener("hide.bs.modal", function() {
- document.getElementById("userOrgTypeDialogTitle").innerHTML = "";
- document.getElementById("userOrgTypeUserUuid").value = "";
- document.getElementById("userOrgTypeOrgUuid").value = "";
-}, false);
-
-function updateUserOrgType(event) {
- event.preventDefault();
- event.stopPropagation();
-
- const data = JSON.stringify(Object.fromEntries(new FormData(event.target).entries()));
-
- _post(`${BASE_URL}/admin/users/org_type`,
- "Updated organization type of the user successfully",
- "Error updating organization type of the user",
- data
- );
-}
-
-function initUserTable() {
- // Color all the org buttons per type
- document.querySelectorAll("button[data-vw-org-type]").forEach(function(e) {
- const orgType = ORG_TYPES[e.dataset.vwOrgType];
- e.style.backgroundColor = orgType.bg;
- if (orgType.font !== undefined) {
- e.style.color = orgType.font;
- }
- e.title = orgType.name;
- });
-
- document.querySelectorAll("button[vw-remove2fa]").forEach(btn => {
- btn.addEventListener("click", remove2fa);
- });
- document.querySelectorAll("button[vw-deauth-user]").forEach(btn => {
- btn.addEventListener("click", deauthUser);
- });
- document.querySelectorAll("button[vw-delete-user]").forEach(btn => {
- btn.addEventListener("click", deleteUser);
- });
- document.querySelectorAll("button[vw-disable-user]").forEach(btn => {
- btn.addEventListener("click", disableUser);
- });
- document.querySelectorAll("button[vw-enable-user]").forEach(btn => {
- btn.addEventListener("click", enableUser);
- });
- document.querySelectorAll("button[vw-resend-user-invite]").forEach(btn => {
- btn.addEventListener("click", resendUserInvite);
- });
-
- if (jdenticon) {
- jdenticon();
- }
-}
-
-// onLoad events
-document.addEventListener("DOMContentLoaded", (/*event*/) => {
- jQuery("#users-table").DataTable({
- "drawCallback": function() {
- initUserTable();
- },
- "stateSave": true,
- "responsive": true,
- "lengthMenu": [
- [-1, 2, 5, 10, 25, 50],
- ["All", 2, 5, 10, 25, 50]
- ],
- "pageLength": -1, // Default show all
- "columnDefs": [{
- "targets": [1, 2],
- "type": "date-iso"
- }, {
- "targets": 6,
- "searchable": false,
- "orderable": false
- }]
- });
-
- // Add click events for user actions
- initUserTable();
-
- const btnUpdateRevisions = document.getElementById("updateRevisions");
- if (btnUpdateRevisions) {
- btnUpdateRevisions.addEventListener("click", updateRevisions);
- }
- const btnReload = document.getElementById("reload");
- if (btnReload) {
- btnReload.addEventListener("click", reload);
- }
- const btnUserOrgTypeForm = document.getElementById("userOrgTypeForm");
- if (btnUserOrgTypeForm) {
- btnUserOrgTypeForm.addEventListener("submit", updateUserOrgType);
- }
- const btnInviteUserForm = document.getElementById("inviteUserForm");
- if (btnInviteUserForm) {
- btnInviteUserForm.addEventListener("submit", inviteUser);
- }
-});
-\ No newline at end of file
diff --git a/src/util.rs b/src/util.rs
@@ -12,7 +12,7 @@ use tokio::{
time::{sleep, Duration},
};
-pub struct AppHeaders();
+pub struct AppHeaders;
#[rocket::async_trait]
impl Fairing for AppHeaders {
@@ -22,7 +22,7 @@ impl Fairing for AppHeaders {
kind: Kind::Response,
}
}
-
+ #[allow(clippy::similar_names)]
async fn on_response<'r>(&self, req: &'r Request<'_>, res: &mut Response<'r>) {
let req_uri_path = req.uri().path();
let req_headers = req.headers();
@@ -59,7 +59,10 @@ impl Fairing for AppHeaders {
// Do not send the Content-Security-Policy (CSP) Header and X-Frame-Options for the *-connector.html files.
// This can cause issues when some MFA requests needs to open a popup or page within the clients like WebAuthn.
// This is the same behavior as upstream Bitwarden.
- if !req_uri_path.ends_with("connector.html") {
+ if req_uri_path.ends_with("connector.html") {
+ // It looks like this header get's set somewhere else also, make sure this is not sent for these files, it will cause MFA issues.
+ res.remove_header("X-Frame-Options");
+ } else {
// # Frame Ancestors:
// Chrome Web Store: https://chrome.google.com/webstore/detail/bitwarden-free-password-m/nngceckbapebfimnlniiiahkandclblb
// Edge Add-ons: https://microsoftedge.microsoft.com/addons/detail/bitwarden-free-password/jbkfoedolllekgbhcbcoahefnbanhhlh?hl=en-US
@@ -102,9 +105,6 @@ impl Fairing for AppHeaders {
);
res.set_raw_header("Content-Security-Policy", csp);
res.set_raw_header("X-Frame-Options", "SAMEORIGIN");
- } else {
- // It looks like this header get's set somewhere else also, make sure this is not sent for these files, it will cause MFA issues.
- res.remove_header("X-Frame-Options");
}
// Disable cache unless otherwise specified
@@ -114,20 +114,19 @@ impl Fairing for AppHeaders {
}
}
-pub struct Cors();
+pub struct Cors;
impl Cors {
fn get_header(headers: &HeaderMap<'_>, name: &str) -> String {
- match headers.get_one(name) {
- Some(h) => h.to_string(),
- _ => String::new(),
- }
+ headers
+ .get_one(name)
+ .map_or_else(String::new, std::string::ToString::to_string)
}
// Check a request's `Origin` header against the list of allowed origins.
// If a match exists, return it. Otherwise, return None.
fn get_allowed_origin(headers: &HeaderMap<'_>) -> Option<String> {
- let origin = Cors::get_header(headers, "Origin");
+ let origin = Self::get_header(headers, "Origin");
let domain_origin = config::get_config().domain_origin();
let safari_extension_origin = "file://";
if origin == domain_origin || origin == safari_extension_origin {
@@ -150,14 +149,14 @@ impl Fairing for Cors {
async fn on_response<'r>(&self, request: &'r Request<'_>, response: &mut Response<'r>) {
let req_headers = request.headers();
- if let Some(origin) = Cors::get_allowed_origin(req_headers) {
+ if let Some(origin) = Self::get_allowed_origin(req_headers) {
response.set_header(Header::new("Access-Control-Allow-Origin", origin));
}
// Preflight request
if request.method() == Method::Options {
- let req_allow_headers = Cors::get_header(req_headers, "Access-Control-Request-Headers");
- let req_allow_method = Cors::get_header(req_headers, "Access-Control-Request-Method");
+ let req_allow_headers = Self::get_header(req_headers, "Access-Control-Request-Headers");
+ let req_allow_method = Self::get_header(req_headers, "Access-Control-Request-Method");
response.set_header(Header::new(
"Access-Control-Allow-Methods",
@@ -182,15 +181,15 @@ pub struct Cached<R> {
}
impl<R> Cached<R> {
- pub fn long(response: R, is_immutable: bool) -> Cached<R> {
+ pub const fn long(response: R, is_immutable: bool) -> Self {
Self {
response,
is_immutable,
- ttl: 604800, // 7 days
+ ttl: 604_800, // 7 days
}
}
- pub fn short(response: R, is_immutable: bool) -> Cached<R> {
+ pub const fn short(response: R, is_immutable: bool) -> Self {
Self {
response,
is_immutable,
@@ -198,7 +197,7 @@ impl<R> Cached<R> {
}
}
- pub fn ttl(response: R, ttl: u64, is_immutable: bool) -> Cached<R> {
+ pub const fn ttl(response: R, ttl: u64, is_immutable: bool) -> Self {
Self {
response,
is_immutable,
@@ -249,13 +248,13 @@ impl AsRef<Path> for SafeString {
impl<'r> FromParam<'r> for SafeString {
type Error = ();
- #[inline(always)]
+ #[inline]
fn from_param(param: &'r str) -> Result<Self, Self::Error> {
if param
.chars()
.all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-'))
{
- Ok(SafeString(param.to_string()))
+ Ok(Self(param.to_string()))
} else {
Err(())
}
@@ -263,24 +262,6 @@ impl<'r> FromParam<'r> for SafeString {
}
use std::path::Path;
-pub fn get_display_size(size: i32) -> String {
- const UNITS: [&str; 6] = ["bytes", "KB", "MB", "GB", "TB", "PB"];
-
- let mut size: f64 = size.into();
- let mut unit_counter = 0;
-
- loop {
- if size > 1024. {
- size /= 1024.;
- unit_counter += 1;
- } else {
- break;
- }
- }
-
- format!("{:.2} {}", size, UNITS[unit_counter])
-}
-
pub fn get_uuid() -> String {
uuid::Uuid::new_v4().to_string()
}
@@ -292,21 +273,19 @@ pub fn get_uuid() -> String {
use std::str::FromStr;
#[inline]
-pub fn upcase_first(s: &str) -> String {
+fn upcase_first(s: &str) -> String {
let mut c = s.chars();
- match c.next() {
- None => String::new(),
- Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
- }
+ c.next().map_or_else(String::new, |f| {
+ f.to_uppercase().collect::<String>() + c.as_str()
+ })
}
#[inline]
-pub fn lcase_first(s: &str) -> String {
+fn lcase_first(s: &str) -> String {
let mut c = s.chars();
- match c.next() {
- None => String::new(),
- Some(f) => f.to_lowercase().collect::<String>() + c.as_str(),
- }
+ c.next().map_or_else(String::new, |f| {
+ f.to_lowercase().collect::<String>() + c.as_str()
+ })
}
pub fn try_parse_string<S, T>(string: Option<S>) -> Option<T>
@@ -333,8 +312,8 @@ pub fn format_date(dt: &NaiveDateTime) -> String {
/// Formats a `DateTime<Local>` as required for HTTP
///
-/// https://httpwg.org/specs/rfc7231.html#http.date
-pub fn format_datetime_http(dt: &DateTime<Local>) -> String {
+/// [http](https://httpwg.org/specs/rfc7231.html#http.date)
+fn format_datetime_http(dt: &DateTime<Local>) -> String {
let expiry_time =
DateTime::<chrono::Utc>::from_naive_utc_and_offset(dt.naive_utc(), chrono::Utc);
@@ -351,7 +330,7 @@ use std::fmt;
use serde::de::{self, DeserializeOwned, Deserializer, MapAccess, SeqAccess, Visitor};
use serde_json::{self, Value};
-pub type JsonMap = serde_json::Map<String, Value>;
+type JsonMap = serde_json::Map<String, Value>;
#[derive(Serialize, Deserialize)]
pub struct UpCase<T: DeserializeOwned> {
@@ -361,7 +340,7 @@ pub struct UpCase<T: DeserializeOwned> {
}
// https://github.com/serde-rs/serde/issues/586
-pub fn upcase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
+fn upcase_deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
T: DeserializeOwned,
D: Deserializer<'de>,
@@ -410,7 +389,7 @@ fn upcase_value(value: Value) -> Value {
if let Value::Object(map) = value {
let mut new_value = Value::Object(serde_json::Map::new());
- for (key, val) in map.into_iter() {
+ for (key, val) in map {
let processed_key = _process_key(&key);
new_value[processed_key] = upcase_value(val);
}
@@ -462,10 +441,10 @@ where
}
}
-pub async fn retry_db<F, T, E>(mut func: F, max_tries: u32) -> Result<T, E>
+pub async fn retry_db<F, T: Send, E>(mut func: F, max_tries: u32) -> Result<T, E>
where
- F: FnMut() -> Result<T, E>,
- E: std::error::Error,
+ F: FnMut() -> Result<T, E> + Send,
+ E: std::error::Error + Send,
{
let mut tries = 0;
@@ -499,7 +478,7 @@ pub fn convert_json_key_lcase_first(src_json: Value) -> Value {
Value::Object(obj) => {
let mut json_map = JsonMap::new();
- for (key, value) in obj.iter() {
+ for (key, value) in &obj {
match (key, value) {
(key, Value::Object(elm)) => {
let inner_value = convert_json_key_lcase_first(Value::Object(elm.clone()));