vw_small

Hardened fork of Vaultwarden (https://github.com/dani-garcia/vaultwarden) with fewer features.
git clone https://git.philomathiclife.com/repos/vw_small
Log | Files | Refs | README

commit 674d9a999e6afca21b6b0be7d7678a32444b9f30
parent 03c6ed2e07a187bc70c8aa348546f25594cd38bd
Author: Zack Newman <zack@philomathiclife.com>
Date:   Sun, 12 Nov 2023 19:42:18 -0700

remove code. add pledge and unveil

Diffstat:
D.dockerignore | 40----------------------------------------
D.editorconfig | 23-----------------------
D.env.template | 454-------------------------------------------------------------------------------
D.gitattributes | 3---
D.github/FUNDING.yml | 3---
D.github/ISSUE_TEMPLATE/bug_report.md | 66------------------------------------------------------------------
D.github/ISSUE_TEMPLATE/config.yml | 8--------
D.github/security-contact.gif | 0
D.github/workflows/build.yml | 197-------------------------------------------------------------------------------
D.github/workflows/hadolint.yml | 33---------------------------------
D.github/workflows/release.yml | 170-------------------------------------------------------------------------------
D.github/workflows/trivy.yml | 43-------------------------------------------
M.gitignore | 18++----------------
D.hadolint.yaml | 13-------------
D.pre-commit-config.yaml | 44--------------------------------------------
MCargo.lock | 1435++++++++++++++++++-------------------------------------------------------------
MCargo.toml | 222++++++++++++++++++-------------------------------------------------------------
DDockerfile | 2--
DREADME.md | 95-------------------------------------------------------------------------------
DSECURITY.md | 45---------------------------------------------
Mbuild.rs | 77-----------------------------------------------------------------------------
Mdiesel.toml | 3+--
Ddocker/DockerSettings.yaml | 28----------------------------
Ddocker/Dockerfile.alpine | 160-------------------------------------------------------------------------------
Ddocker/Dockerfile.debian | 194-------------------------------------------------------------------------------
Ddocker/Dockerfile.j2 | 237-------------------------------------------------------------------------------
Ddocker/Makefile | 4----
Ddocker/README.md | 183-------------------------------------------------------------------------------
Ddocker/bake.sh | 15---------------
Ddocker/bake_env.sh | 33---------------------------------
Ddocker/docker-bake.hcl | 235-------------------------------------------------------------------------------
Ddocker/healthcheck.sh | 57---------------------------------------------------------
Ddocker/podman-bake.sh | 105-------------------------------------------------------------------------------
Ddocker/render_template | 31-------------------------------
Ddocker/start.sh | 25-------------------------
Dmigrations/mysql/2018-01-14-171611_create_tables/down.sql | 10----------
Dmigrations/mysql/2018-01-14-171611_create_tables/up.sql | 62--------------------------------------------------------------
Dmigrations/mysql/2018-02-17-205753_create_collections_and_orgs/down.sql | 8--------
Dmigrations/mysql/2018-02-17-205753_create_collections_and_orgs/up.sql | 30------------------------------
Dmigrations/mysql/2018-04-27-155151_create_users_ciphers/down.sql | 0
Dmigrations/mysql/2018-04-27-155151_create_users_ciphers/up.sql | 34----------------------------------
Dmigrations/mysql/2018-05-08-161616_create_collection_cipher_map/down.sql | 2--
Dmigrations/mysql/2018-05-08-161616_create_collection_cipher_map/up.sql | 5-----
Dmigrations/mysql/2018-05-25-232323_update_attachments_reference/down.sql | 0
Dmigrations/mysql/2018-05-25-232323_update_attachments_reference/up.sql | 14--------------
Dmigrations/mysql/2018-06-01-112529_update_devices_twofactor_remember/down.sql | 2--
Dmigrations/mysql/2018-06-01-112529_update_devices_twofactor_remember/up.sql | 4----
Dmigrations/mysql/2018-07-11-181453_create_u2f_twofactor/down.sql | 9---------
Dmigrations/mysql/2018-07-11-181453_create_u2f_twofactor/up.sql | 15---------------
Dmigrations/mysql/2018-08-27-172114_update_ciphers/down.sql | 0
Dmigrations/mysql/2018-08-27-172114_update_ciphers/up.sql | 4----
Dmigrations/mysql/2018-09-10-111213_add_invites/down.sql | 2--
Dmigrations/mysql/2018-09-10-111213_add_invites/up.sql | 3---
Dmigrations/mysql/2018-09-19-144557_add_kdf_columns/down.sql | 0
Dmigrations/mysql/2018-09-19-144557_add_kdf_columns/up.sql | 7-------
Dmigrations/mysql/2018-11-27-152651_add_att_key_columns/down.sql | 0
Dmigrations/mysql/2018-11-27-152651_add_att_key_columns/up.sql | 3---
Dmigrations/mysql/2019-05-26-216651_rename_key_and_type_columns/down.sql | 8--------
Dmigrations/mysql/2019-05-26-216651_rename_key_and_type_columns/up.sql | 8--------
Dmigrations/mysql/2019-10-10-083032_add_column_to_twofactor/down.sql | 0
Dmigrations/mysql/2019-10-10-083032_add_column_to_twofactor/up.sql | 2--
Dmigrations/mysql/2019-11-17-011009_add_email_verification/down.sql | 1-
Dmigrations/mysql/2019-11-17-011009_add_email_verification/up.sql | 5-----
Dmigrations/mysql/2020-03-13-205045_add_policy_table/down.sql | 1-
Dmigrations/mysql/2020-03-13-205045_add_policy_table/up.sql | 9---------
Dmigrations/mysql/2020-04-09-235005_add_cipher_delete_date/down.sql | 1-
Dmigrations/mysql/2020-04-09-235005_add_cipher_delete_date/up.sql | 3---
Dmigrations/mysql/2020-07-01-214531_add_hide_passwords/down.sql | 0
Dmigrations/mysql/2020-07-01-214531_add_hide_passwords/up.sql | 2--
Dmigrations/mysql/2020-08-02-025025_add_favorites_table/down.sql | 13-------------
Dmigrations/mysql/2020-08-02-025025_add_favorites_table/up.sql | 16----------------
Dmigrations/mysql/2020-11-30-224000_add_user_enabled/down.sql | 0
Dmigrations/mysql/2020-11-30-224000_add_user_enabled/up.sql | 1-
Dmigrations/mysql/2020-12-09-173101_add_stamp_exception/down.sql | 0
Dmigrations/mysql/2020-12-09-173101_add_stamp_exception/up.sql | 2--
Dmigrations/mysql/2021-03-11-190243_add_sends/down.sql | 1-
Dmigrations/mysql/2021-03-11-190243_add_sends/up.sql | 26--------------------------
Dmigrations/mysql/2021-04-30-233251_add_reprompt/down.sql | 0
Dmigrations/mysql/2021-04-30-233251_add_reprompt/up.sql | 2--
Dmigrations/mysql/2021-05-11-205202_add_hide_email/down.sql | 0
Dmigrations/mysql/2021-05-11-205202_add_hide_email/up.sql | 2--
Dmigrations/mysql/2021-07-01-203140_add_password_reset_keys/down.sql | 0
Dmigrations/mysql/2021-07-01-203140_add_password_reset_keys/up.sql | 5-----
Dmigrations/mysql/2021-08-30-193501_create_emergency_access/down.sql | 1-
Dmigrations/mysql/2021-08-30-193501_create_emergency_access/up.sql | 14--------------
Dmigrations/mysql/2021-10-24-164321_add_2fa_incomplete/down.sql | 1-
Dmigrations/mysql/2021-10-24-164321_add_2fa_incomplete/up.sql | 9---------
Dmigrations/mysql/2022-01-17-234911_add_api_key/down.sql | 0
Dmigrations/mysql/2022-01-17-234911_add_api_key/up.sql | 2--
Dmigrations/mysql/2022-03-02-210038_update_devices_primary_key/down.sql | 0
Dmigrations/mysql/2022-03-02-210038_update_devices_primary_key/up.sql | 4----
Dmigrations/mysql/2022-07-27-110000_add_group_support/down.sql | 4----
Dmigrations/mysql/2022-07-27-110000_add_group_support/up.sql | 24------------------------
Dmigrations/mysql/2022-10-18-170602_add_events/down.sql | 1-
Dmigrations/mysql/2022-10-18-170602_add_events/up.sql | 19-------------------
Dmigrations/mysql/2023-01-06-151600_add_reset_password_support/down.sql | 0
Dmigrations/mysql/2023-01-06-151600_add_reset_password_support/up.sql | 2--
Dmigrations/mysql/2023-01-11-205851_add_avatar_color/down.sql | 0
Dmigrations/mysql/2023-01-11-205851_add_avatar_color/up.sql | 2--
Dmigrations/mysql/2023-01-31-222222_add_argon2/down.sql | 0
Dmigrations/mysql/2023-01-31-222222_add_argon2/up.sql | 7-------
Dmigrations/mysql/2023-02-18-125735_push_uuid_table/down.sql | 0
Dmigrations/mysql/2023-02-18-125735_push_uuid_table/up.sql | 2--
Dmigrations/mysql/2023-06-02-200424_create_organization_api_key/down.sql | 0
Dmigrations/mysql/2023-06-02-200424_create_organization_api_key/up.sql | 10----------
Dmigrations/mysql/2023-06-17-200424_create_auth_requests_table/down.sql | 0
Dmigrations/mysql/2023-06-17-200424_create_auth_requests_table/up.sql | 20--------------------
Dmigrations/mysql/2023-06-28-133700_add_collection_external_id/down.sql | 0
Dmigrations/mysql/2023-06-28-133700_add_collection_external_id/up.sql | 1-
Dmigrations/mysql/2023-09-01-170620_update_auth_request_table/down.sql | 0
Dmigrations/mysql/2023-09-01-170620_update_auth_request_table/up.sql | 5-----
Dmigrations/mysql/2023-09-02-212336_move_user_external_id/down.sql | 0
Dmigrations/mysql/2023-09-02-212336_move_user_external_id/up.sql | 2--
Dmigrations/mysql/2023-10-21-221242_add_cipher_key/down.sql | 0
Dmigrations/mysql/2023-10-21-221242_add_cipher_key/up.sql | 2--
Dmigrations/postgresql/2019-09-12-100000_create_tables/down.sql | 13-------------
Dmigrations/postgresql/2019-09-12-100000_create_tables/up.sql | 122-------------------------------------------------------------------------------
Dmigrations/postgresql/2019-09-16-150000_fix_attachments/down.sql | 27---------------------------
Dmigrations/postgresql/2019-09-16-150000_fix_attachments/up.sql | 28----------------------------
Dmigrations/postgresql/2019-10-10-083032_add_column_to_twofactor/down.sql | 0
Dmigrations/postgresql/2019-10-10-083032_add_column_to_twofactor/up.sql | 2--
Dmigrations/postgresql/2019-11-17-011009_add_email_verification/down.sql | 1-
Dmigrations/postgresql/2019-11-17-011009_add_email_verification/up.sql | 5-----
Dmigrations/postgresql/2020-03-13-205045_add_policy_table/down.sql | 1-
Dmigrations/postgresql/2020-03-13-205045_add_policy_table/up.sql | 9---------
Dmigrations/postgresql/2020-04-09-235005_add_cipher_delete_date/down.sql | 1-
Dmigrations/postgresql/2020-04-09-235005_add_cipher_delete_date/up.sql | 3---
Dmigrations/postgresql/2020-07-01-214531_add_hide_passwords/down.sql | 0
Dmigrations/postgresql/2020-07-01-214531_add_hide_passwords/up.sql | 2--
Dmigrations/postgresql/2020-08-02-025025_add_favorites_table/down.sql | 13-------------
Dmigrations/postgresql/2020-08-02-025025_add_favorites_table/up.sql | 16----------------
Dmigrations/postgresql/2020-11-30-224000_add_user_enabled/down.sql | 0
Dmigrations/postgresql/2020-11-30-224000_add_user_enabled/up.sql | 1-
Dmigrations/postgresql/2020-12-09-173101_add_stamp_exception/down.sql | 0
Dmigrations/postgresql/2020-12-09-173101_add_stamp_exception/up.sql | 2--
Dmigrations/postgresql/2021-03-11-190243_add_sends/down.sql | 1-
Dmigrations/postgresql/2021-03-11-190243_add_sends/up.sql | 26--------------------------
Dmigrations/postgresql/2021-03-15-163412_rename_send_key/down.sql | 0
Dmigrations/postgresql/2021-03-15-163412_rename_send_key/up.sql | 1-
Dmigrations/postgresql/2021-04-30-233251_add_reprompt/down.sql | 0
Dmigrations/postgresql/2021-04-30-233251_add_reprompt/up.sql | 2--
Dmigrations/postgresql/2021-05-11-205202_add_hide_email/down.sql | 0
Dmigrations/postgresql/2021-05-11-205202_add_hide_email/up.sql | 2--
Dmigrations/postgresql/2021-07-01-203140_add_password_reset_keys/down.sql | 0
Dmigrations/postgresql/2021-07-01-203140_add_password_reset_keys/up.sql | 5-----
Dmigrations/postgresql/2021-08-30-193501_create_emergency_access/down.sql | 1-
Dmigrations/postgresql/2021-08-30-193501_create_emergency_access/up.sql | 14--------------
Dmigrations/postgresql/2021-10-24-164321_add_2fa_incomplete/down.sql | 1-
Dmigrations/postgresql/2021-10-24-164321_add_2fa_incomplete/up.sql | 9---------
Dmigrations/postgresql/2022-01-17-234911_add_api_key/down.sql | 0
Dmigrations/postgresql/2022-01-17-234911_add_api_key/up.sql | 2--
Dmigrations/postgresql/2022-03-02-210038_update_devices_primary_key/down.sql | 0
Dmigrations/postgresql/2022-03-02-210038_update_devices_primary_key/up.sql | 4----
Dmigrations/postgresql/2022-07-27-110000_add_group_support/down.sql | 4----
Dmigrations/postgresql/2022-07-27-110000_add_group_support/up.sql | 24------------------------
Dmigrations/postgresql/2022-10-18-170602_add_events/down.sql | 1-
Dmigrations/postgresql/2022-10-18-170602_add_events/up.sql | 19-------------------
Dmigrations/postgresql/2023-01-06-151600_add_reset_password_support/down.sql | 0
Dmigrations/postgresql/2023-01-06-151600_add_reset_password_support/up.sql | 2--
Dmigrations/postgresql/2023-01-11-205851_add_avatar_color/down.sql | 0
Dmigrations/postgresql/2023-01-11-205851_add_avatar_color/up.sql | 2--
Dmigrations/postgresql/2023-01-31-222222_add_argon2/down.sql | 0
Dmigrations/postgresql/2023-01-31-222222_add_argon2/up.sql | 7-------
Dmigrations/postgresql/2023-02-18-125735_push_uuid_table/down.sql | 0
Dmigrations/postgresql/2023-02-18-125735_push_uuid_table/up.sql | 2--
Dmigrations/postgresql/2023-06-02-200424_create_organization_api_key/down.sql | 0
Dmigrations/postgresql/2023-06-02-200424_create_organization_api_key/up.sql | 10----------
Dmigrations/postgresql/2023-06-17-200424_create_auth_requests_table/down.sql | 0
Dmigrations/postgresql/2023-06-17-200424_create_auth_requests_table/up.sql | 20--------------------
Dmigrations/postgresql/2023-06-28-133700_add_collection_external_id/down.sql | 0
Dmigrations/postgresql/2023-06-28-133700_add_collection_external_id/up.sql | 1-
Dmigrations/postgresql/2023-09-01-170620_update_auth_request_table/down.sql | 0
Dmigrations/postgresql/2023-09-01-170620_update_auth_request_table/up.sql | 5-----
Dmigrations/postgresql/2023-09-02-212336_move_user_external_id/down.sql | 0
Dmigrations/postgresql/2023-09-02-212336_move_user_external_id/up.sql | 2--
Dmigrations/postgresql/2023-10-21-221242_add_cipher_key/down.sql | 0
Dmigrations/postgresql/2023-10-21-221242_add_cipher_key/up.sql | 2--
Drust-toolchain.toml | 4----
Drustfmt.toml | 4----
Msrc/api/admin.rs | 803+------------------------------------------------------------------------------
Msrc/api/core/accounts.rs | 296++++++++++++++++++++++++++++++++++++++++++++++---------------------------------
Msrc/api/core/ciphers.rs | 677+++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------
Msrc/api/core/emergency_access.rs | 268++++++++++++++++++++++++++++++++++++-------------------------------------------
Msrc/api/core/events.rs | 283+++++++++----------------------------------------------------------------------
Msrc/api/core/mod.rs | 56+++++++-------------------------------------------------
Msrc/api/core/organizations.rs | 1250+++++++++++++++++++++++++++++++------------------------------------------------
Msrc/api/core/public.rs | 72+++++++++++++++++++++++++++++-------------------------------------------
Msrc/api/core/sends.rs | 100++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----------------------
Msrc/api/core/two_factor/authenticator.rs | 70++++++++++++++++++++++++++++++++++++++++++++++++----------------------
Dsrc/api/core/two_factor/duo.rs | 373-------------------------------------------------------------------------------
Dsrc/api/core/two_factor/email.rs | 337-------------------------------------------------------------------------------
Msrc/api/core/two_factor/mod.rs | 109++++++++++++++++++++++++-------------------------------------------------------
Msrc/api/core/two_factor/webauthn.rs | 196++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------
Dsrc/api/core/two_factor/yubikey.rs | 201-------------------------------------------------------------------------------
Msrc/api/icons.rs | 995++-----------------------------------------------------------------------------
Msrc/api/identity.rs | 227++++++++++++++++++++++++++-----------------------------------------------------
Msrc/api/mod.rs | 21+++++++--------------
Msrc/api/notifications.rs | 371+++++++++++++++++++++++++++----------------------------------------------------
Dsrc/api/push.rs | 294-------------------------------------------------------------------------------
Msrc/api/web.rs | 158+++++++++++++++++++++++++++++++++++++++++++++++++------------------------------
Msrc/auth.rs | 130++++++++++++++++++++++++++++++++++++++++++++-----------------------------------
Msrc/config.rs | 522+++++++++----------------------------------------------------------------------
Msrc/crypto.rs | 17++++-------------
Msrc/db/mod.rs | 149++++++++-----------------------------------------------------------------------
Msrc/db/models/cipher.rs | 117+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---------------------
Msrc/db/models/org_policy.rs | 57++++++++++++++++++++++++++++++++++++++-------------------
Msrc/db/models/organization.rs | 145+++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------------
Msrc/db/models/two_factor.rs | 86+++++--------------------------------------------------------------------------
Dsrc/db/schemas/mysql/schema.rs | 363-------------------------------------------------------------------------------
Dsrc/db/schemas/postgresql/schema.rs | 363-------------------------------------------------------------------------------
Msrc/error.rs | 66+++++++++++++++++++++++++++++++++++++++++++++++++++---------------
Msrc/mail.rs | 197++++++++++++++++++++++++++++++++++++-------------------------------------------
Msrc/main.rs | 559++++++++++++-------------------------------------------------------------------
Asrc/priv_sep.rs | 85+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Msrc/ratelimit.rs | 51++++++++++++++++++++++-----------------------------
Msrc/static/scripts/admin_diagnostics.js | 3+--
Msrc/util.rs | 164+++++++++++++------------------------------------------------------------------
Dtools/global_domains.py | 81-------------------------------------------------------------------------------
218 files changed, 2994 insertions(+), 12496 deletions(-)

diff --git a/.dockerignore b/.dockerignore @@ -1,40 +0,0 @@ -# Local build artifacts -target - -# Data folder -data - -# Misc -.env -.env.template -.gitattributes -.gitignore -rustfmt.toml - -# IDE files -.vscode -.idea -.editorconfig -*.iml - -# Documentation -.github -*.md -*.txt -*.yml -*.yaml - -# Docker -hooks -tools -Dockerfile -.dockerignore -docker/** -!docker/healthcheck.sh -!docker/start.sh - -# Web vault -web-vault - -# Vaultwarden Resources -resources diff --git a/.editorconfig b/.editorconfig @@ -1,23 +0,0 @@ -# EditorConfig is awesome: https://EditorConfig.org - -# top-most EditorConfig file -root = true - -[*] -end_of_line = lf -charset = utf-8 - -[*.{rs,py}] -indent_style = space -indent_size = 4 -trim_trailing_whitespace = true -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true -insert_final_newline = true - -[Makefile] -indent_style = tab diff --git a/.env.template b/.env.template @@ -1,454 +0,0 @@ -# shellcheck disable=SC2034,SC2148 -## Vaultwarden Configuration File -## Uncomment any of the following lines to change the defaults -## -## Be aware that most of these settings will be overridden if they were changed -## in the admin interface. Those overrides are stored within DATA_FOLDER/config.json . -## -## By default, Vaultwarden expects for this file to be named ".env" and located -## in the current working directory. If this is not the case, the environment -## variable ENV_FILE can be set to the location of this file prior to starting -## Vaultwarden. - -## Main data folder -# DATA_FOLDER=data - -## Database URL -## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3 -# DATABASE_URL=data/db.sqlite3 -## When using MySQL, specify an appropriate connection URI. -## Details: https://docs.diesel.rs/diesel/mysql/struct.MysqlConnection.html -# DATABASE_URL=mysql://user:password@host[:port]/database_name -## When using PostgreSQL, specify an appropriate connection URI (recommended) -## or keyword/value connection string. -## Details: -## - https://docs.diesel.rs/diesel/pg/struct.PgConnection.html -## - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING -# DATABASE_URL=postgresql://user:password@host[:port]/database_name - -## Database max connections -## Define the size of the connection pool used for connecting to the database. -# DATABASE_MAX_CONNS=10 - -## Database timeout -## Timeout when acquiring database connection -# DATABASE_TIMEOUT=30 - -## Database connection initialization -## Allows SQL statements to be run whenever a new database connection is created. -## This is mainly useful for connection-scoped pragmas. -## If empty, a database-specific default is used: -## - SQLite: "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;" -## - MySQL: "" -## - PostgreSQL: "" -# DATABASE_CONN_INIT="" - -## Individual folders, these override %DATA_FOLDER% -# RSA_KEY_FILENAME=data/rsa_key -# ICON_CACHE_FOLDER=data/icon_cache -# ATTACHMENTS_FOLDER=data/attachments -# SENDS_FOLDER=data/sends -# TMP_FOLDER=data/tmp - -## Templates data folder, by default uses embedded templates -## Check source code to see the format -# TEMPLATES_FOLDER=/path/to/templates -## Automatically reload the templates for every request, slow, use only for development -# RELOAD_TEMPLATES=false - -## Client IP Header, used to identify the IP of the client, defaults to "X-Real-IP" -## Set to the string "none" (without quotes), to disable any headers and just use the remote IP -# IP_HEADER=X-Real-IP - -## Cache time-to-live for successfully obtained icons, in seconds (0 is "forever") -# ICON_CACHE_TTL=2592000 -## Cache time-to-live for icons which weren't available, in seconds (0 is "forever") -# ICON_CACHE_NEGTTL=259200 - -## Web vault settings -# WEB_VAULT_FOLDER=web-vault/ -# WEB_VAULT_ENABLED=true - -## Enables websocket notifications -# WEBSOCKET_ENABLED=false - -## Controls the WebSocket server address and port -# WEBSOCKET_ADDRESS=0.0.0.0 -# WEBSOCKET_PORT=3012 - -## Enables push notifications (requires key and id from https://bitwarden.com/host) -# PUSH_ENABLED=true -# PUSH_INSTALLATION_ID=CHANGEME -# PUSH_INSTALLATION_KEY=CHANGEME -## Don't change this unless you know what you're doing. -# PUSH_RELAY_URI=https://push.bitwarden.com - -## Controls whether users are allowed to create Bitwarden Sends. -## This setting applies globally to all users. -## To control this on a per-org basis instead, use the "Disable Send" org policy. -# SENDS_ALLOWED=true - -## Controls whether users can enable emergency access to their accounts. -## This setting applies globally to all users. -# EMERGENCY_ACCESS_ALLOWED=true - -## Controls whether event logging is enabled for organizations -## This setting applies to organizations. -## Disabled by default. Also check the EVENT_CLEANUP_SCHEDULE and EVENTS_DAYS_RETAIN settings. -# ORG_EVENTS_ENABLED=false - -## Controls whether users can change their email. -## This setting applies globally to all users -# EMAIL_CHANGE_ALLOWED=true - -## Number of days to retain events stored in the database. -## If unset (the default), events are kept indefinitely and the scheduled job is disabled! -# EVENTS_DAYS_RETAIN= - -## BETA FEATURE: Groups -## Controls whether group support is enabled for organizations -## This setting applies to organizations. -## Disabled by default because this is a beta feature, it contains known issues! -## KNOW WHAT YOU ARE DOING! -# ORG_GROUPS_ENABLED=false - -## Job scheduler settings -## -## Job schedules use a cron-like syntax (as parsed by https://crates.io/crates/cron), -## and are always in terms of UTC time (regardless of your local time zone settings). -## -## The schedule format is a bit different from crontab as crontab does not contains seconds. -## You can test the the format here: https://crontab.guru, but remove the first digit! -## SEC MIN HOUR DAY OF MONTH MONTH DAY OF WEEK -## "0 30 9,12,15 1,15 May-Aug Mon,Wed,Fri" -## "0 30 * * * * " -## "0 30 1 * * * " -## -## How often (in ms) the job scheduler thread checks for jobs that need running. -## Set to 0 to globally disable scheduled jobs. -# JOB_POLL_INTERVAL_MS=30000 -## -## Cron schedule of the job that checks for Sends past their deletion date. -## Defaults to hourly (5 minutes after the hour). Set blank to disable this job. -# SEND_PURGE_SCHEDULE="0 5 * * * *" -## -## Cron schedule of the job that checks for trashed items to delete permanently. -## Defaults to daily (5 minutes after midnight). Set blank to disable this job. -# TRASH_PURGE_SCHEDULE="0 5 0 * * *" -## -## Cron schedule of the job that checks for incomplete 2FA logins. -## Defaults to once every minute. Set blank to disable this job. -# INCOMPLETE_2FA_SCHEDULE="30 * * * * *" -## -## Cron schedule of the job that sends expiration reminders to emergency access grantors. -## Defaults to hourly (3 minutes after the hour). Set blank to disable this job. -# EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE="0 3 * * * *" -## -## Cron schedule of the job that grants emergency access requests that have met the required wait time. -## Defaults to hourly (7 minutes after the hour). Set blank to disable this job. -# EMERGENCY_REQUEST_TIMEOUT_SCHEDULE="0 7 * * * *" -## -## Cron schedule of the job that cleans old events from the event table. -## Defaults to daily. Set blank to disable this job. Also without EVENTS_DAYS_RETAIN set, this job will not start. -# EVENT_CLEANUP_SCHEDULE="0 10 0 * * *" - -## Enable extended logging, which shows timestamps and targets in the logs -# EXTENDED_LOGGING=true - -## Timestamp format used in extended logging. -## Format specifiers: https://docs.rs/chrono/latest/chrono/format/strftime -# LOG_TIMESTAMP_FORMAT="%Y-%m-%d %H:%M:%S.%3f" - -## Logging to file -# LOG_FILE=/path/to/log - -## Logging to Syslog -## This requires extended logging -# USE_SYSLOG=false - -## Log level -## Change the verbosity of the log output -## Valid values are "trace", "debug", "info", "warn", "error" and "off" -## Setting it to "trace" or "debug" would also show logs for mounted -## routes and static file, websocket and alive requests -# LOG_LEVEL=Info - -## Enable WAL for the DB -## Set to false to avoid enabling WAL during startup. -## Note that if the DB already has WAL enabled, you will also need to disable WAL in the DB, -## this setting only prevents Vaultwarden from automatically enabling it on start. -## Please read project wiki page about this setting first before changing the value as it can -## cause performance degradation or might render the service unable to start. -# ENABLE_DB_WAL=true - -## Database connection retries -## Number of times to retry the database connection during startup, with 1 second delay between each retry, set to 0 to retry indefinitely -# DB_CONNECTION_RETRIES=15 - -## Icon service -## The predefined icon services are: internal, bitwarden, duckduckgo, google. -## To specify a custom icon service, set a URL template with exactly one instance of `{}`, -## which is replaced with the domain. For example: `https://icon.example.com/domain/{}`. -## -## `internal` refers to Vaultwarden's built-in icon fetching implementation. -## If an external service is set, an icon request to Vaultwarden will return an HTTP -## redirect to the corresponding icon at the external service. An external service may -## be useful if your Vaultwarden instance has no external network connectivity, or if -## you are concerned that someone may probe your instance to try to detect whether icons -## for certain sites have been cached. -# ICON_SERVICE=internal - -## Icon redirect code -## The HTTP status code to use for redirects to an external icon service. -## The supported codes are 301 (legacy permanent), 302 (legacy temporary), 307 (temporary), and 308 (permanent). -## Temporary redirects are useful while testing different icon services, but once a service -## has been decided on, consider using permanent redirects for cacheability. The legacy codes -## are currently better supported by the Bitwarden clients. -# ICON_REDIRECT_CODE=302 - -## Disable icon downloading -## Set to true to disable icon downloading in the internal icon service. -## This still serves existing icons from $ICON_CACHE_FOLDER, without generating any external -## network requests. $ICON_CACHE_TTL must also be set to 0; otherwise, the existing icons -## will be deleted eventually, but won't be downloaded again. -# DISABLE_ICON_DOWNLOAD=false - -## Icon download timeout -## Configure the timeout value when downloading the favicons. -## The default is 10 seconds, but this could be to low on slower network connections -# ICON_DOWNLOAD_TIMEOUT=10 - -## Icon blacklist Regex -## Any domains or IPs that match this regex won't be fetched by the icon service. -## Useful to hide other servers in the local network. Check the WIKI for more details -## NOTE: Always enclose this regex withing single quotes! -# ICON_BLACKLIST_REGEX='^(192\.168\.0\.[0-9]+|192\.168\.1\.[0-9]+)$' - -## Any IP which is not defined as a global IP will be blacklisted. -## Useful to secure your internal environment: See https://en.wikipedia.org/wiki/Reserved_IP_addresses for a list of IPs which it will block -# ICON_BLACKLIST_NON_GLOBAL_IPS=true - -## Disable 2FA remember -## Enabling this would force the users to use a second factor to login every time. -## Note that the checkbox would still be present, but ignored. -# DISABLE_2FA_REMEMBER=false - -## Maximum attempts before an email token is reset and a new email will need to be sent. -# EMAIL_ATTEMPTS_LIMIT=3 - -## Token expiration time -## Maximum time in seconds a token is valid. The time the user has to open email client and copy token. -# EMAIL_EXPIRATION_TIME=600 - -## Email token size -## Number of digits in an email 2FA token (min: 6, max: 255). -## Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting! -# EMAIL_TOKEN_SIZE=6 - -## Controls if new users can register -# SIGNUPS_ALLOWED=true - -## Controls if new users need to verify their email address upon registration -## Note that setting this option to true prevents logins until the email address has been verified! -## The welcome email will include a verification link, and login attempts will periodically -## trigger another verification email to be sent. -# SIGNUPS_VERIFY=false - -## If SIGNUPS_VERIFY is set to true, this limits how many seconds after the last time -## an email verification link has been sent another verification email will be sent -# SIGNUPS_VERIFY_RESEND_TIME=3600 - -## If SIGNUPS_VERIFY is set to true, this limits how many times an email verification -## email will be re-sent upon an attempted login. -# SIGNUPS_VERIFY_RESEND_LIMIT=6 - -## Controls if new users from a list of comma-separated domains can register -## even if SIGNUPS_ALLOWED is set to false -# SIGNUPS_DOMAINS_WHITELIST=example.com,example.net,example.org - -## Controls which users can create new orgs. -## Blank or 'all' means all users can create orgs (this is the default): -# ORG_CREATION_USERS= -## 'none' means no users can create orgs: -# ORG_CREATION_USERS=none -## A comma-separated list means only those users can create orgs: -# ORG_CREATION_USERS=admin1@example.com,admin2@example.com - -## Token for the admin interface, preferably an Argon2 PCH string -## Vaultwarden has a built-in generator by calling `vaultwarden hash` -## For details see: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token -## If not set, the admin panel is disabled -## New Argon2 PHC string -## Note that for some environments, like docker-compose you need to escape all the dollar signs `$` with an extra dollar sign like `$$` -## Also, use single quotes (') instead of double quotes (") to enclose the string when needed -# ADMIN_TOKEN='$argon2id$v=19$m=65540,t=3,p=4$MmeKRnGK5RW5mJS7h3TOL89GrpLPXJPAtTK8FTqj9HM$DqsstvoSAETl9YhnsXbf43WeaUwJC6JhViIvuPoig78' -## Old plain text string (Will generate warnings in favor of Argon2) -# ADMIN_TOKEN=Vy2VyYTTsKPv8W5aEOWUbB/Bt3DEKePbHmI4m9VcemUMS2rEviDowNAFqYi1xjmp - -## Enable this to bypass the admin panel security. This option is only -## meant to be used with the use of a separate auth layer in front -# DISABLE_ADMIN_TOKEN=false - -## Invitations org admins to invite users, even when signups are disabled -# INVITATIONS_ALLOWED=true -## Name shown in the invitation emails that don't come from a specific organization -# INVITATION_ORG_NAME=Vaultwarden - -## The number of hours after which an organization invite token, emergency access invite token, -## email verification token and deletion request token will expire (must be at least 1) -# INVITATION_EXPIRATION_HOURS=120 - -## Per-organization attachment storage limit (KB) -## Max kilobytes of attachment storage allowed per organization. -## When this limit is reached, organization members will not be allowed to upload further attachments for ciphers owned by that organization. -# ORG_ATTACHMENT_LIMIT= -## Per-user attachment storage limit (KB) -## Max kilobytes of attachment storage allowed per user. -## When this limit is reached, the user will not be allowed to upload further attachments. -# USER_ATTACHMENT_LIMIT= - -## Number of days to wait before auto-deleting a trashed item. -## If unset (the default), trashed items are not auto-deleted. -## This setting applies globally, so make sure to inform all users of any changes to this setting. -# TRASH_AUTO_DELETE_DAYS= - -## Number of minutes to wait before a 2FA-enabled login is considered incomplete, -## resulting in an email notification. An incomplete 2FA login is one where the correct -## master password was provided but the required 2FA step was not completed, which -## potentially indicates a master password compromise. Set to 0 to disable this check. -## This setting applies globally to all users. -# INCOMPLETE_2FA_TIME_LIMIT=3 - -## Number of server-side passwords hashing iterations for the password hash. -## The default for new users. If changed, it will be updated during login for existing users. -# PASSWORD_ITERATIONS=350000 - -## Controls whether users can set password hints. This setting applies globally to all users. -# PASSWORD_HINTS_ALLOWED=true - -## Controls whether a password hint should be shown directly in the web page if -## SMTP service is not configured. Not recommended for publicly-accessible instances -## as this provides unauthenticated access to potentially sensitive data. -# SHOW_PASSWORD_HINT=false - -## Domain settings -## The domain must match the address from where you access the server -## It's recommended to configure this value, otherwise certain functionality might not work, -## like attachment downloads, email links and U2F. -## For U2F to work, the server must use HTTPS, you can use Let's Encrypt for free certs -# DOMAIN=https://vw.domain.tld:8443 - -## Allowed iframe ancestors (Know the risks!) -## https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/frame-ancestors -## Allows other domains to embed the web vault into an iframe, useful for embedding into secure intranets -## This adds the configured value to the 'Content-Security-Policy' headers 'frame-ancestors' value. -## Multiple values must be separated with a whitespace. -# ALLOWED_IFRAME_ANCESTORS= - -## Number of seconds, on average, between login requests from the same IP address before rate limiting kicks in. -# LOGIN_RATELIMIT_SECONDS=60 -## Allow a burst of requests of up to this size, while maintaining the average indicated by `LOGIN_RATELIMIT_SECONDS`. -## Note that this applies to both the login and the 2FA, so it's recommended to allow a burst size of at least 2. -# LOGIN_RATELIMIT_MAX_BURST=10 - -## Number of seconds, on average, between admin login requests from the same IP address before rate limiting kicks in. -# ADMIN_RATELIMIT_SECONDS=300 -## Allow a burst of requests of up to this size, while maintaining the average indicated by `ADMIN_RATELIMIT_SECONDS`. -# ADMIN_RATELIMIT_MAX_BURST=3 - -## Set the lifetime of admin sessions to this value (in minutes). -# ADMIN_SESSION_LIFETIME=20 - -## Yubico (Yubikey) Settings -## Set your Client ID and Secret Key for Yubikey OTP -## You can generate it here: https://upgrade.yubico.com/getapikey/ -## You can optionally specify a custom OTP server -# YUBICO_CLIENT_ID=11111 -# YUBICO_SECRET_KEY=AAAAAAAAAAAAAAAAAAAAAAAA -# YUBICO_SERVER=http://yourdomain.com/wsapi/2.0/verify - -## Duo Settings -## You need to configure all options to enable global Duo support, otherwise users would need to configure it themselves -## Create an account and protect an application as mentioned in this link (only the first step, not the rest): -## https://help.bitwarden.com/article/setup-two-step-login-duo/#create-a-duo-security-account -## Then set the following options, based on the values obtained from the last step: -# DUO_IKEY=<Integration Key> -# DUO_SKEY=<Secret Key> -# DUO_HOST=<API Hostname> -## After that, you should be able to follow the rest of the guide linked above, -## ignoring the fields that ask for the values that you already configured beforehand. - -## Authenticator Settings -## Disable authenticator time drifted codes to be valid. -## TOTP codes of the previous and next 30 seconds will be invalid -## -## According to the RFC6238 (https://tools.ietf.org/html/rfc6238), -## we allow by default the TOTP code which was valid one step back and one in the future. -## This can however allow attackers to be a bit more lucky with there attempts because there are 3 valid codes. -## You can disable this, so that only the current TOTP Code is allowed. -## Keep in mind that when a sever drifts out of time, valid codes could be marked as invalid. -## In any case, if a code has been used it can not be used again, also codes which predates it will be invalid. -# AUTHENTICATOR_DISABLE_TIME_DRIFT=false - -## Rocket specific settings -## See https://rocket.rs/v0.4/guide/configuration/ for more details. -# ROCKET_ADDRESS=0.0.0.0 -# ROCKET_PORT=80 # Defaults to 80 in the Docker images, or 8000 otherwise. -# ROCKET_WORKERS=10 -# ROCKET_TLS={certs="/path/to/certs.pem",key="/path/to/key.pem"} - -## Mail specific settings, set SMTP_FROM and either SMTP_HOST or USE_SENDMAIL to enable the mail service. -## To make sure the email links are pointing to the correct host, set the DOMAIN variable. -## Note: if SMTP_USERNAME is specified, SMTP_PASSWORD is mandatory -# SMTP_HOST=smtp.domain.tld -# SMTP_FROM=vaultwarden@domain.tld -# SMTP_FROM_NAME=Vaultwarden -# SMTP_SECURITY=starttls # ("starttls", "force_tls", "off") Enable a secure connection. Default is "starttls" (Explicit - ports 587 or 25), "force_tls" (Implicit - port 465) or "off", no encryption (port 25) -# SMTP_PORT=587 # Ports 587 (submission) and 25 (smtp) are standard without encryption and with encryption via STARTTLS (Explicit TLS). Port 465 (submissions) is used for encrypted submission (Implicit TLS). -# SMTP_USERNAME=username -# SMTP_PASSWORD=password -# SMTP_TIMEOUT=15 - -# Whether to send mail via the `sendmail` command -# USE_SENDMAIL=false -# Which sendmail command to use. The one found in the $PATH is used if not specified. -# SENDMAIL_COMMAND="/path/to/sendmail" - -## Defaults for SSL is "Plain" and "Login" and nothing for Non-SSL connections. -## Possible values: ["Plain", "Login", "Xoauth2"]. -## Multiple options need to be separated by a comma ','. -# SMTP_AUTH_MECHANISM="Plain" - -## Server name sent during the SMTP HELO -## By default this value should be is on the machine's hostname, -## but might need to be changed in case it trips some anti-spam filters -# HELO_NAME= - -## Embed images as email attachments -# SMTP_EMBED_IMAGES=false - -## SMTP debugging -## When set to true this will output very detailed SMTP messages. -## WARNING: This could contain sensitive information like passwords and usernames! Only enable this during troubleshooting! -# SMTP_DEBUG=false - -## Accept Invalid Hostnames -## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks! -## Only use this as a last resort if you are not able to use a valid certificate. -# SMTP_ACCEPT_INVALID_HOSTNAMES=false - -## Accept Invalid Certificates -## DANGEROUS: This option introduces significant vulnerabilities to man-in-the-middle attacks! -## Only use this as a last resort if you are not able to use a valid certificate. -## If the Certificate is valid but the hostname doesn't match, please use SMTP_ACCEPT_INVALID_HOSTNAMES instead. -# SMTP_ACCEPT_INVALID_CERTS=false - -## Require new device emails. When a user logs in an email is required to be sent. -## If sending the email fails the login attempt will fail!! -# REQUIRE_DEVICE_EMAIL=false - -## HIBP Api Key -## HaveIBeenPwned API Key, request it here: https://haveibeenpwned.com/API/Key -# HIBP_API_KEY= - -# vim: syntax=ini diff --git a/.gitattributes b/.gitattributes @@ -1,3 +0,0 @@ -# Ignore vendored scripts in GitHub stats -src/static/scripts/* linguist-vendored - diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml @@ -1,3 +0,0 @@ -github: dani-garcia -liberapay: dani-garcia -custom: ["https://paypal.me/DaniGG"] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,66 +0,0 @@ ---- -name: Bug report -about: Use this ONLY for bugs in vaultwarden itself. Use the Discourse forum (link below) to request features or get help with usage/configuration. If in doubt, use the forum. -title: '' -labels: '' -assignees: '' - ---- -<!-- - # ### - NOTE: Please update to the latest version of vaultwarden before reporting an issue! - This saves you and us a lot of time and troubleshooting. - See: - * https://github.com/dani-garcia/vaultwarden/issues/1180 - * https://github.com/dani-garcia/vaultwarden/wiki/Updating-the-vaultwarden-image - # ### ---> - -<!-- -Please fill out the following template to make solving your problem easier and faster for us. -This is only a guideline. If you think that parts are unnecessary for your issue, feel free to remove them. - -Remember to hide/redact personal or confidential information, -such as passwords, IP addresses, and DNS names as appropriate. ---> - -### Subject of the issue -<!-- Describe your issue here. --> - -### Deployment environment - -<!-- - ========================================================================================= - Preferably, use the `Generate Support String` button on the admin page's Diagnostics tab. - That will auto-generate most of the info requested in this section. - ========================================================================================= ---> - -<!-- The version number, obtained from the logs (at startup) or the admin diagnostics page --> -<!-- This is NOT the version number shown on the web vault, which is versioned separately from vaultwarden --> -<!-- Remember to check if your issue exists on the latest version first! --> -* vaultwarden version: - -<!-- How the server was installed: Docker image, OS package, built from source, etc. --> -* Install method: - -* Clients used: <!-- web vault, desktop, Android, iOS, etc. (if applicable) --> - -* Reverse proxy and version: <!-- if applicable --> - -* MySQL/MariaDB or PostgreSQL version: <!-- if applicable --> - -* Other relevant details: - -### Steps to reproduce -<!-- Tell us how to reproduce this issue. What parameters did you set (differently from the defaults) -and how did you start vaultwarden? --> - -### Expected behaviour -<!-- Tell us what you expected to happen --> - -### Actual behaviour -<!-- Tell us what actually happened --> - -### Troubleshooting data -<!-- Share any log files, screenshots, or other relevant troubleshooting data --> diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: Discourse forum for vaultwarden - url: https://vaultwarden.discourse.group/ - about: Use this forum to request features or get help with usage/configuration. - - name: GitHub Discussions for vaultwarden - url: https://github.com/dani-garcia/vaultwarden/discussions - about: An alternative to the Discourse forum, if this is easier for you. diff --git a/.github/security-contact.gif b/.github/security-contact.gif Binary files differ. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml @@ -1,197 +0,0 @@ -name: Build - -on: - push: - paths: - - ".github/workflows/build.yml" - - "src/**" - - "migrations/**" - - "Cargo.*" - - "build.rs" - - "rust-toolchain.toml" - - "rustfmt.toml" - - "diesel.toml" - - "docker/Dockerfile.j2" - - "docker/DockerSettings.yaml" - pull_request: - paths: - - ".github/workflows/build.yml" - - "src/**" - - "migrations/**" - - "Cargo.*" - - "build.rs" - - "rust-toolchain.toml" - - "rustfmt.toml" - - "diesel.toml" - - "docker/Dockerfile.j2" - - "docker/DockerSettings.yaml" - -jobs: - build: - runs-on: ubuntu-22.04 - timeout-minutes: 120 - # Make warnings errors, this is to prevent warnings slipping through. - # This is done globally to prevent rebuilds when the RUSTFLAGS env variable changes. - env: - RUSTFLAGS: "-D warnings" - strategy: - fail-fast: false - matrix: - channel: - - "rust-toolchain" # The version defined in rust-toolchain - - "msrv" # The supported MSRV - - name: Build and Test ${{ matrix.channel }} - - steps: - # Checkout the repo - - name: "Checkout" - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - # End Checkout the repo - - - # Install dependencies - - name: "Install dependencies Ubuntu" - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends openssl build-essential libmariadb-dev-compat libpq-dev libssl-dev pkg-config - # End Install dependencies - - - # Determine rust-toolchain version - - name: Init Variables - id: toolchain - shell: bash - run: | - if [[ "${{ matrix.channel }}" == 'rust-toolchain' ]]; then - RUST_TOOLCHAIN="$(grep -oP 'channel.*"(\K.*?)(?=")' rust-toolchain.toml)" - elif [[ "${{ matrix.channel }}" == 'msrv' ]]; then - RUST_TOOLCHAIN="$(grep -oP 'rust-version.*"(\K.*?)(?=")' Cargo.toml)" - else - RUST_TOOLCHAIN="${{ matrix.channel }}" - fi - echo "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" | tee -a "${GITHUB_OUTPUT}" - # End Determine rust-toolchain version - - - # Only install the clippy and rustfmt components on the default rust-toolchain - - name: "Install rust-toolchain version" - uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2 - if: ${{ matrix.channel == 'rust-toolchain' }} - with: - toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" - components: clippy, rustfmt - # End Uses the rust-toolchain file to determine version - - - # Install the any other channel to be used for which we do not execute clippy and rustfmt - - name: "Install MSRV version" - uses: dtolnay/rust-toolchain@439cf607258077187679211f12aa6f19af4a0af7 # master @ 2023-09-19 - 05:31 PM GMT+2 - if: ${{ matrix.channel != 'rust-toolchain' }} - with: - toolchain: "${{steps.toolchain.outputs.RUST_TOOLCHAIN}}" - # End Install the MSRV channel to be used - - # Set the current matrix toolchain version as default - - name: "Set toolchain ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} as default" - run: | - # Remove the rust-toolchain.toml - rm rust-toolchain.toml - # Set the default - rustup default ${{steps.toolchain.outputs.RUST_TOOLCHAIN}} - - # Show environment - - name: "Show environment" - run: | - rustc -vV - cargo -vV - # End Show environment - - # Enable Rust Caching - - uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # v2.7.0 - with: - # Use a custom prefix-key to force a fresh start. This is sometimes needed with bigger changes. - # Like changing the build host from Ubuntu 20.04 to 22.04 for example. - # Only update when really needed! Use a <year>.<month>[.<inc>] format. - prefix-key: "v2023.07-rust" - # End Enable Rust Caching - - # Run cargo tests - # First test all features together, afterwards test them separately. - - name: "test features: sqlite,mysql,postgresql,enable_mimalloc" - id: test_sqlite_mysql_postgresql_mimalloc - if: $${{ always() }} - run: | - cargo test --features sqlite,mysql,postgresql,enable_mimalloc - - - name: "test features: sqlite,mysql,postgresql" - id: test_sqlite_mysql_postgresql - if: $${{ always() }} - run: | - cargo test --features sqlite,mysql,postgresql - - - name: "test features: sqlite" - id: test_sqlite - if: $${{ always() }} - run: | - cargo test --features sqlite - - - name: "test features: mysql" - id: test_mysql - if: $${{ always() }} - run: | - cargo test --features mysql - - - name: "test features: postgresql" - id: test_postgresql - if: $${{ always() }} - run: | - cargo test --features postgresql - # End Run cargo tests - - - # Run cargo clippy, and fail on warnings - - name: "clippy features: sqlite,mysql,postgresql,enable_mimalloc" - id: clippy - if: ${{ always() && matrix.channel == 'rust-toolchain' }} - run: | - cargo clippy --features sqlite,mysql,postgresql,enable_mimalloc -- -D warnings - # End Run cargo clippy - - - # Run cargo fmt (Only run on rust-toolchain defined version) - - name: "check formatting" - id: formatting - if: ${{ always() && matrix.channel == 'rust-toolchain' }} - run: | - cargo fmt --all -- --check - # End Run cargo fmt - - - # Check for any previous failures, if there are stop, else continue. - # This is useful so all test/clippy/fmt actions are done, and they can all be addressed - - name: "Some checks failed" - if: ${{ failure() }} - run: | - echo "### :x: Checks Failed!" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "|Job|Status|" >> $GITHUB_STEP_SUMMARY - echo "|---|------|" >> $GITHUB_STEP_SUMMARY - echo "|test (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.test_sqlite_mysql_postgresql_mimalloc.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (sqlite,mysql,postgresql)|${{ steps.test_sqlite_mysql_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (sqlite)|${{ steps.test_sqlite.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (mysql)|${{ steps.test_mysql.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|test (postgresql)|${{ steps.test_postgresql.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|clippy (sqlite,mysql,postgresql,enable_mimalloc)|${{ steps.clippy.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "|fmt|${{ steps.formatting.outcome }}|" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "Please check the failed jobs and fix where needed." >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - exit 1 - - - # Check for any previous failures, if there are stop, else continue. - # This is useful so all test/clippy/fmt actions are done, and they can all be addressed - - name: "All checks passed" - if: ${{ success() }} - run: | - echo "### :tada: Checks Passed!" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml @@ -1,33 +0,0 @@ -name: Hadolint - -on: [ - push, - pull_request - ] - -jobs: - hadolint: - name: Validate Dockerfile syntax - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - # Checkout the repo - - name: Checkout - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - # End Checkout the repo - - # Download hadolint - https://github.com/hadolint/hadolint/releases - - name: Download hadolint - shell: bash - run: | - sudo curl -L https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VERSION}/hadolint-$(uname -s)-$(uname -m) -o /usr/local/bin/hadolint && \ - sudo chmod +x /usr/local/bin/hadolint - env: - HADOLINT_VERSION: 2.12.0 - # End Download hadolint - - # Test Dockerfiles - - name: Run hadolint - shell: bash - run: hadolint docker/Dockerfile.{debian,alpine} - # End Test Dockerfiles diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml @@ -1,170 +0,0 @@ -name: Release - -on: - push: - paths: - - ".github/workflows/release.yml" - - "src/**" - - "migrations/**" - - "docker/**" - - "Cargo.*" - - "build.rs" - - "diesel.toml" - - "rust-toolchain.toml" - - branches: # Only on paths above - - main - - release-build-revision - - tags: # Always, regardless of paths above - - '*' - -jobs: - # https://github.com/marketplace/actions/skip-duplicate-actions - # Some checks to determine if we need to continue with building a new docker. - # We will skip this check if we are creating a tag, because that has the same hash as a previous run already. - skip_check: - runs-on: ubuntu-22.04 - if: ${{ github.repository == 'dani-garcia/vaultwarden' }} - outputs: - should_skip: ${{ steps.skip_check.outputs.should_skip }} - steps: - - name: Skip Duplicates Actions - id: skip_check - uses: fkirc/skip-duplicate-actions@12aca0a884f6137d619d6a8a09fcc3406ced5281 # v5.3.0 - with: - cancel_others: 'true' - # Only run this when not creating a tag - if: ${{ github.ref_type == 'branch' }} - - docker-build: - runs-on: ubuntu-22.04 - timeout-minutes: 120 - needs: skip_check - if: ${{ needs.skip_check.outputs.should_skip != 'true' && github.repository == 'dani-garcia/vaultwarden' }} - # TODO: Start a local docker registry to be used to extract the final Alpine static build images - # services: - # registry: - # image: registry:2 - # ports: - # - 5000:5000 - env: - SOURCE_COMMIT: ${{ github.sha }} - SOURCE_REPOSITORY_URL: "https://github.com/${{ github.repository }}" - # The *_REPO variables need to be configured as repository variables - # Append `/settings/variables/actions` to your repo url - # DOCKERHUB_REPO needs to be 'index.docker.io/<user>/<repo>' - # Check for Docker hub credentials in secrets - HAVE_DOCKERHUB_LOGIN: ${{ vars.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }} - # GHCR_REPO needs to be 'ghcr.io/<user>/<repo>' - # Check for Github credentials in secrets - HAVE_GHCR_LOGIN: ${{ vars.GHCR_REPO != '' && github.repository_owner != '' && secrets.GITHUB_TOKEN != '' }} - # QUAY_REPO needs to be 'quay.io/<user>/<repo>' - # Check for Quay.io credentials in secrets - HAVE_QUAY_LOGIN: ${{ vars.QUAY_REPO != '' && secrets.QUAY_USERNAME != '' && secrets.QUAY_TOKEN != '' }} - strategy: - matrix: - base_image: ["debian","alpine"] - - steps: - # Checkout the repo - - name: Checkout - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 - with: - fetch-depth: 0 - - - name: Initialize QEMU binfmt support - uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0 - with: - platforms: "arm64,arm" - - # Start Docker Buildx - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - # https://github.com/moby/buildkit/issues/3969 - # Also set max parallelism to 2, the default of 4 breaks GitHub Actions - with: - config-inline: | - [worker.oci] - max-parallelism = 2 - driver-opts: | - network=host - - # Determine Base Tags and Source Version - - name: Determine Base Tags and Source Version - shell: bash - run: | - # Check which main tag we are going to build determined by github.ref_type - if [[ "${{ github.ref_type }}" == "tag" ]]; then - echo "BASE_TAGS=latest,${GITHUB_REF#refs/*/}" | tee -a "${GITHUB_ENV}" - elif [[ "${{ github.ref_type }}" == "branch" ]]; then - echo "BASE_TAGS=testing" | tee -a "${GITHUB_ENV}" - fi - - # Get the Source Version for this release - GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null || true)" - if [[ -n "${GIT_EXACT_TAG}" ]]; then - echo "SOURCE_VERSION=${GIT_EXACT_TAG}" | tee -a "${GITHUB_ENV}" - else - GIT_LAST_TAG="$(git describe --tags --abbrev=0)" - echo "SOURCE_VERSION=${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" | tee -a "${GITHUB_ENV}" - fi - # End Determine Base Tags - - # Login to Docker Hub - - name: Login to Docker Hub - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }} - - - name: Add registry for DockerHub - if: ${{ env.HAVE_DOCKERHUB_LOGIN == 'true' }} - shell: bash - run: | - echo "CONTAINER_REGISTRIES=${{ vars.DOCKERHUB_REPO }}" | tee -a "${GITHUB_ENV}" - - # Login to GitHub Container Registry - - name: Login to GitHub Container Registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - if: ${{ env.HAVE_GHCR_LOGIN == 'true' }} - - - name: Add registry for ghcr.io - if: ${{ env.HAVE_GHCR_LOGIN == 'true' }} - shell: bash - run: | - echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.GHCR_REPO }}" | tee -a "${GITHUB_ENV}" - - # Login to Quay.io - - name: Login to Quay.io - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 - with: - registry: quay.io - username: ${{ secrets.QUAY_USERNAME }} - password: ${{ secrets.QUAY_TOKEN }} - if: ${{ env.HAVE_QUAY_LOGIN == 'true' }} - - - name: Add registry for Quay.io - if: ${{ env.HAVE_QUAY_LOGIN == 'true' }} - shell: bash - run: | - echo "CONTAINER_REGISTRIES=${CONTAINER_REGISTRIES:+${CONTAINER_REGISTRIES},}${{ vars.QUAY_REPO }}" | tee -a "${GITHUB_ENV}" - - - name: Bake ${{ matrix.base_image }} containers - uses: docker/bake-action@511fde2517761e303af548ec9e0ea74a8a100112 # v4.0.0 - env: - BASE_TAGS: "${{ env.BASE_TAGS }}" - SOURCE_COMMIT: "${{ env.SOURCE_COMMIT }}" - SOURCE_VERSION: "${{ env.SOURCE_VERSION }}" - SOURCE_REPOSITORY_URL: "${{ env.SOURCE_REPOSITORY_URL }}" - CONTAINER_REGISTRIES: "${{ env.CONTAINER_REGISTRIES }}" - with: - pull: true - push: true - files: docker/docker-bake.hcl - targets: "${{ matrix.base_image }}-multi" diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml @@ -1,43 +0,0 @@ -name: trivy - -on: - push: - branches: - - main - - release-build-revision - tags: - - '*' - pull_request: - branches: [ "main" ] - schedule: - - cron: '00 12 * * *' - -permissions: - contents: read - -jobs: - trivy-scan: - name: Check - runs-on: ubuntu-22.04 - timeout-minutes: 30 - permissions: - contents: read - security-events: write - actions: read - steps: - - name: Checkout code - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 - - - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@f78e9ecf42a1271402d4f484518b9313235990e1 # v0.13.1 - with: - scan-type: repo - ignore-unfixed: true - format: sarif - output: trivy-results.sarif - severity: CRITICAL,HIGH - - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@bad341350a2f5616f9e048e51360cedc49181ce8 # v2.22.4 - with: - sarif_file: 'trivy-results.sarif' diff --git a/.gitignore b/.gitignore @@ -1,16 +1,2 @@ -# Local build artifacts -target - -# Data folder -data - -# IDE files -.vscode -.idea -*.iml - -# Environment file -.env - -# Web vault -web-vault +Cargo.lock +target/** diff --git a/.hadolint.yaml b/.hadolint.yaml @@ -1,13 +0,0 @@ -ignored: - # To prevent issues and make clear some images only work on linux/amd64, we ignore this - - DL3029 - # disable explicit version for apt install - - DL3008 - # disable explicit version for apk install - - DL3018 - # Ignore shellcheck info message - - SC1091 -trustedRegistries: - - docker.io - - ghcr.io - - quay.io diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml @@ -1,44 +0,0 @@ ---- -repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 - hooks: - - id: check-yaml - - id: check-json - - id: check-toml - - id: mixed-line-ending - args: ["--fix=no"] - - id: end-of-file-fixer - exclude: "(.*js$|.*css$)" - - id: check-case-conflict - - id: check-merge-conflict - - id: detect-private-key - - id: check-symlinks - - id: forbid-submodules -- repo: local - hooks: - - id: fmt - name: fmt - description: Format files with cargo fmt. - entry: cargo fmt - language: system - types: [rust] - args: ["--", "--check"] - - id: cargo-test - name: cargo test - description: Test the package for errors. - entry: cargo test - language: system - args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--"] - types_or: [rust, file] - files: (Cargo.toml|Cargo.lock|rust-toolchain|.*\.rs$) - pass_filenames: false - - id: cargo-clippy - name: cargo clippy - description: Lint Rust sources - entry: cargo clippy - language: system - args: ["--features", "sqlite,mysql,postgresql,enable_mimalloc", "--", "-D", "warnings"] - types_or: [rust, file] - files: (Cargo.toml|Cargo.lock|rust-toolchain|clippy.toml|.*\.rs$) - pass_filenames: false diff --git a/Cargo.lock b/Cargo.lock @@ -19,24 +19,14 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.6" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom", - "once_cell", - "version_check", -] - -[[package]] -name = "ahash" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -49,21 +39,6 @@ dependencies = [ ] [[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] - -[[package]] name = "allocator-api2" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -85,18 +60,6 @@ dependencies = [ ] [[package]] -name = "argon2" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ba4cac0a46bc1d2912652a751c47f2a9f3a7fe89bcae2275d418f5270402f9" -dependencies = [ - "base64ct", - "blake2", - "cpufeatures", - "password-hash", -] - -[[package]] name = "async-channel" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -108,30 +71,29 @@ dependencies = [ ] [[package]] -name = "async-compression" -version = "0.4.4" +name = "async-channel" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" +checksum = "d37875bd9915b7d67c2f117ea2c30a0989874d0b2cb694fe25403c85763c0c9e" dependencies = [ - "brotli", - "flate2", + "concurrent-queue", + "event-listener 3.1.0", + "event-listener-strategy", "futures-core", - "memchr", "pin-project-lite", - "tokio", ] [[package]] name = "async-executor" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0c4a4f319e45986f347ee47fef8bf5e81c9abc3f6f58dc2391439f30df65f0" +checksum = "0de517d5a758a65a16d18d8f605e7a6beed477444cca270116af40fd3cd59d27" dependencies = [ - "async-lock", + "async-lock 3.1.0", "async-task", "concurrent-queue", "fastrand 2.0.1", - "futures-lite", + "futures-lite 2.0.1", "slab", ] @@ -141,12 +103,12 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "blocking", - "futures-lite", + "futures-lite 1.13.0", "once_cell", ] @@ -156,21 +118,41 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", "cfg-if", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", - "rustix 0.37.26", + "polling 2.8.0", + "rustix 0.37.27", "slab", "socket2 0.4.10", "waker-fn", ] [[package]] +name = "async-io" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41ed9d5715c2d329bf1b4da8d60455b99b187f27ba726df2883799af9af60997" +dependencies = [ + "async-lock 3.1.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.0.1", + "parking", + "polling 3.3.0", + "rustix 0.38.21", + "slab", + "tracing", + "waker-fn", + "windows-sys", +] + +[[package]] name = "async-lock" version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -180,35 +162,46 @@ dependencies = [ ] [[package]] +name = "async-lock" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb2ab2aa8a746e221ab826c73f48bc6ba41be6763f0855cb249eb6d154cf1d7" +dependencies = [ + "event-listener 3.1.0", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] name = "async-process" version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6438ba0a08d81529c69b36700fa2f95837bfe3e776ab39cde9c14d9149da88" dependencies = [ - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "async-signal", "blocking", "cfg-if", - "event-listener 3.0.0", - "futures-lite", - "rustix 0.38.20", + "event-listener 3.1.0", + "futures-lite 1.13.0", + "rustix 0.38.21", "windows-sys", ] [[package]] name = "async-signal" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2a5415b7abcdc9cd7d63d6badba5288b2ca017e3fbd4173b8f405449f1a2399" +checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" dependencies = [ - "async-io", - "async-lock", + "async-io 2.2.0", + "async-lock 2.8.0", "atomic-waker", "cfg-if", "futures-core", "futures-io", - "rustix 0.38.20", + "rustix 0.38.21", "signal-hook-registry", "slab", "windows-sys", @@ -220,16 +213,16 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ - "async-channel", + "async-channel 1.9.0", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", @@ -260,7 +253,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -277,7 +270,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -330,15 +323,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" - -[[package]] -name = "base64ct" -version = "1.6.0" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "binascii" @@ -359,15 +346,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] -name = "blake2" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" -dependencies = [ - "digest", -] - -[[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -378,42 +356,21 @@ dependencies = [ [[package]] name = "blocking" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c36a4d0d48574b3dd360b4b7d95cc651d2b6557b6402848a27d4b228a473e2a" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" dependencies = [ - "async-channel", - "async-lock", + "async-channel 2.1.0", + "async-lock 3.1.0", "async-task", "fastrand 2.0.1", "futures-io", - "futures-lite", + "futures-lite 2.0.1", "piper", "tracing", ] [[package]] -name = "brotli" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516074a47ef4bce09577a3b379392300159ce5b1ba2e501ff1c819950066100f" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da74e2b81409b1b743f8f0c62cc6254afefb8b8e50bbfe3735550f7aeefa3448" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - -[[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -438,46 +395,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] -name = "cached" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cead8ece0da6b744b2ad8ef9c58a4cdc7ef2921e60a6ddfb9eaaa86839b5fc5" -dependencies = [ - "ahash 0.8.3", - "async-trait", - "cached_proc_macro", - "cached_proc_macro_types", - "futures", - "hashbrown 0.14.2", - "instant", - "once_cell", - "thiserror", - "tokio", -] - -[[package]] -name = "cached_proc_macro" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8245dd5f576a41c3b76247b54c15b0e43139ceeb4f732033e15be7c005176" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "cached_proc_macro_types" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a4f925191b4367301851c6d99b09890311d74b0d43f274c0b34c86d308a3663" - -[[package]] name = "cc" -version = "1.0.83" +version = "1.0.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "0f8e7c90afad890484a21653d08b6e209ae34770fb5ee298f9c699fcc1e5c856" dependencies = [ "libc", ] @@ -503,9 +424,9 @@ dependencies = [ [[package]] name = "chrono-tz" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1369bc6b9e9a7dfdae2055f6ec151fe9c554a9d23d357c0237cee2e25eaabb7" +checksum = "e23185c0e21df6ed832a12e2bda87c7d1def6842881fb634a8511ced741b0d76" dependencies = [ "chrono", "chrono-tz-build", @@ -514,9 +435,9 @@ dependencies = [ [[package]] name = "chrono-tz-build" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2f5ebdc942f57ed96d560a6d1a459bae5851102a25d5bf89dc04ae453e31ecf" +checksum = "433e39f13c9a060046954e0592a8d0a4bcb1040125cbf91cb8ee58964cfb350f" dependencies = [ "parse-zoneinfo", "phf", @@ -525,11 +446,11 @@ dependencies = [ [[package]] name = "chumsky" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23170228b96236b5a7299057ac284a321457700bc8c41a4476052f0f4ba5349d" +checksum = "8eebd66744a15ded14960ab4ccdbfb51ad3b81f51f3f04a80adac98c985396c9" dependencies = [ - "hashbrown 0.12.3", + "hashbrown 0.14.2", "stacker", ] @@ -544,17 +465,6 @@ dependencies = [ [[package]] name = "cookie" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" -dependencies = [ - "percent-encoding", - "time", - "version_check", -] - -[[package]] -name = "cookie" version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7efb37c3e1ccb1ff97164ad95ac1606e8ccd35b3fa0a7d99a304c7f4a428cc24" @@ -565,40 +475,6 @@ dependencies = [ ] [[package]] -name = "cookie_store" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d606d0fba62e13cf04db20536c05cb7f13673c161cb47a47a82b9b9e7d3f1daa" -dependencies = [ - "cookie 0.16.2", - "idna 0.2.3", - "log", - "publicsuffix", - "serde", - "serde_derive", - "serde_json", - "time", - "url", -] - -[[package]] -name = "cookie_store" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5a18f35792056f8c7c2de9c002e7e4fe44c7b5f66e7d99f46468dbb730a7ea7" -dependencies = [ - "cookie 0.16.2", - "idna 0.3.0", - "log", - "publicsuffix", - "serde", - "serde_derive", - "serde_json", - "time", - "url", -] - -[[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -616,34 +492,14 @@ checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbc60abd742b35f2492f808e1abbb83d45f72db402e14c55057edc9c7b1e9e4" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" dependencies = [ "libc", ] [[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "cron" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff76b51e4c068c52bfd2866e1567bee7c567ae8f24ada09fd4307019e25eab7" -dependencies = [ - "chrono", - "nom", - "once_cell", -] - -[[package]] name = "crossbeam-utils" version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -663,41 +519,6 @@ dependencies = [ ] [[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.109", -] - -[[package]] name = "dashmap" version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -717,12 +538,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] -name = "data-url" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b319d1b62ffbd002e057f36bebd1f42b9f97927c9577461d855f3513c4289f" - -[[package]] name = "deranged" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -761,7 +576,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -770,18 +585,11 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2268a214a6f118fce1838edba3d1561cf0e78d8de785475957a580a7f8c69d33" dependencies = [ - "bitflags 2.4.1", - "byteorder", "chrono", "diesel_derives", - "itoa", "libsqlite3-sys", - "mysqlclient-sys", - "percent-encoding", - "pq-sys", "r2d2", "time", - "url", ] [[package]] @@ -793,17 +601,7 @@ dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.38", -] - -[[package]] -name = "diesel_logger" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23010b507517129dc9b11fb35f36d76fd2d3dd4c85232733697622e345375f2f" -dependencies = [ - "diesel", - "log", + "syn", ] [[package]] @@ -823,7 +621,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.38", + "syn", ] [[package]] @@ -855,7 +653,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbfb21b9878cf7a348dcb8559109aabc0ec40d69924bd706fa5149846c4fef75" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "memchr", ] @@ -864,9 +662,6 @@ name = "email_address" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2153bd83ebc09db15bcbdc3e2194d901804952e3dc96967e1cd3b0c5c32d112" -dependencies = [ - "serde", -] [[package]] name = "encoding_rs" @@ -878,18 +673,6 @@ dependencies = [ ] [[package]] -name = "enum-as-inner" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.38", -] - -[[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -897,24 +680,15 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" dependencies = [ "libc", "windows-sys", ] [[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] - -[[package]] name = "event-listener" version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -922,9 +696,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29e56284f00d94c1bc7fd3c77027b4623c88c1f53d8d2394c6199f2921dea325" +checksum = "d93877bcde0eb80ca09131a08d23f0a5c18a620b01db137dba666d18cd9b30c2" dependencies = [ "concurrent-queue", "parking", @@ -932,6 +706,16 @@ dependencies = [ ] [[package]] +name = "event-listener-strategy" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96b852f1345da36d551b9473fa1e2b1eb5c5195585c6c018118bc92a8d91160" +dependencies = [ + "event-listener 3.1.0", + "pin-project-lite", +] + +[[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -947,42 +731,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] -name = "fern" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" -dependencies = [ - "libc", - "log", - "reopen", - "syslog", -] - -[[package]] name = "figment" -version = "0.10.11" +version = "0.10.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a014ac935975a70ad13a3bff2463b1c1b083b35ae4cb6309cfc59476aa7a181f" +checksum = "649f3e5d826594057e9a519626304d8da859ea8a0b18ce99500c586b8d45faee" dependencies = [ "atomic 0.6.0", "pear", "serde", - "toml 0.8.2", + "toml 0.8.8", "uncased", "version_check", ] [[package]] -name = "flate2" -version = "1.0.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1014,9 +776,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -1029,9 +791,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -1039,15 +801,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -1056,9 +818,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-lite" @@ -1076,27 +838,37 @@ dependencies = [ ] [[package]] +name = "futures-lite" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3831c2651acb5177cbd83943f3d9c8912c5ad03c76afcc0e9511ba568ec5ebb" +dependencies = [ + "futures-core", + "pin-project-lite", +] + +[[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-timer" @@ -1106,9 +878,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-channel", "futures-core", @@ -1147,9 +919,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "libc", @@ -1193,31 +965,10 @@ dependencies = [ "no-std-compat", "nonzero_ext", "parking_lot", - "quanta", - "rand", "smallvec", ] [[package]] -name = "h2" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap 1.9.3", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] name = "half" version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1225,9 +976,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.4.0" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c39b3bc2a8f715298032cf5087e58573809374b08160aa7d750582bdb82d2683" +checksum = "faa67bab9ff362228eb3d00bd024a4965d8231bbb7921167f0cfa66c6626b225" dependencies = [ "log", "pest", @@ -1243,9 +994,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.6", -] [[package]] name = "hashbrown" @@ -1253,17 +1001,11 @@ version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" dependencies = [ - "ahash 0.8.3", + "ahash", "allocator-api2", ] [[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] name = "hermit-abi" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1288,30 +1030,10 @@ dependencies = [ ] [[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - -[[package]] -name = "html5gum" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4e556171a058ba117bbe88b059fb37b6289023e007d2903ea6dca3a3cbff14" -dependencies = [ - "jetscii", -] - -[[package]] name = "http" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "f95b9abcae896730d42b78e09c155ed4ddf82c07b4de772c64aee5b2d8b7c150" dependencies = [ "bytes", "fnv", @@ -1351,7 +1073,6 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2", "http", "http-body", "httparse", @@ -1366,19 +1087,6 @@ dependencies = [ ] [[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] name = "iana-time-zone" version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1402,33 +1110,6 @@ dependencies = [ ] [[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] name = "idna" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1451,9 +1132,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.2", @@ -1486,31 +1167,13 @@ dependencies = [ ] [[package]] -name = "ipconfig" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" -dependencies = [ - "socket2 0.5.5", - "widestring", - "windows-sys", - "winreg", -] - -[[package]] -name = "ipnet" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" - -[[package]] name = "is-terminal" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.20", + "rustix 0.38.21", "windows-sys", ] @@ -1521,40 +1184,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] -name = "jetscii" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47f142fe24a9c9944451e8349de0a56af5f3e7226dc46f3ed4d4ecc0b85af75e" - -[[package]] -name = "job_scheduler_ng" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10bbdf445513bbe53f4666218b7057d265c76fa0b30475e121a6bf05dbaacaae" -dependencies = [ - "chrono", - "cron", - "uuid", -] - -[[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonwebtoken" -version = "9.0.0" +version = "9.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e863f95209c79b9b8b001c4b03463385f890a765dbc4e0802cb8d4177e3e410" +checksum = "155c4d7e39ad04c172c5e3a99c434ea3b4a7ba7960b38ecd562b270b097cce09" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "pem", - "ring 0.17.5", + "ring", "serde", "serde_json", "simple_asn1", @@ -1577,50 +1223,37 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lettre" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d47084ad58f99c26816d174702f60e873f861fcef3f9bd6075b4ad2dd72d07d5" +checksum = "a466bc111374ccf4d90877dba636924a2185e67e5be4b35d32043199365097b2" dependencies = [ "async-std", "async-trait", - "base64 0.21.4", + "base64 0.21.5", "chumsky", "email-encoding", "email_address", "fastrand 2.0.1", "futures-io", "futures-util", - "hostname", "httpdate", - "idna 0.4.0", + "idna", "mime", "native-tls", "nom", "once_cell", "quoted_printable", - "serde", "socket2 0.5.5", "tokio", "tokio-native-tls", - "tracing", "url", ] [[package]] name = "libc" -version = "0.2.149" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" - -[[package]] -name = "libmimalloc-sys" -version = "0.1.35" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3979b5c37ece694f1f5e51e7ecc871fdb0f517ed04ee45f88d15d6d553cb9664" -dependencies = [ - "cc", - "libc", -] +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libsqlite3-sys" @@ -1634,12 +1267,6 @@ dependencies = [ ] [[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] name = "linux-raw-sys" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1647,9 +1274,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lock_api" @@ -1686,30 +1313,6 @@ dependencies = [ ] [[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "mach2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" -dependencies = [ - "libc", -] - -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - -[[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1719,12 +1322,6 @@ dependencies = [ ] [[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - -[[package]] name = "memchr" version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1752,15 +1349,6 @@ dependencies = [ ] [[package]] -name = "mimalloc" -version = "0.1.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa01922b5ea280a911e323e4d2fd24b7fe5cc4042e0d2cda3c40775cdc4bdc9c" -dependencies = [ - "libmimalloc-sys", -] - -[[package]] name = "mime" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1783,9 +1371,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi", @@ -1806,23 +1394,13 @@ dependencies = [ "log", "memchr", "mime", - "spin 0.9.8", + "spin", "tokio", "tokio-util", "version_check", ] [[package]] -name = "mysqlclient-sys" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61b381528ba293005c42a409dd73d034508e273bf90481f17ec2e964a6e969b" -dependencies = [ - "pkg-config", - "vcpkg", -] - -[[package]] name = "native-tls" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1891,7 +1469,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -1924,15 +1502,6 @@ dependencies = [ ] [[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - -[[package]] name = "object" version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1949,9 +1518,9 @@ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33" dependencies = [ "bitflags 2.4.1", "cfg-if", @@ -1970,7 +1539,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -1980,23 +1549,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "openssl-src" -version = "111.28.0+1.1.1w" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce95ee1f6f999dfb95b8afd43ebe442758ea2104d1ccb99a94c30db22ae701f" -dependencies = [ - "cc", -] - -[[package]] name = "openssl-sys" -version = "0.9.92" +version = "0.9.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db7e971c2c2bba161b2d2fdf37080177eff520b3bc044787c7f1f5f9e78d869b" +checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] @@ -2031,7 +1590,7 @@ checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall", "smallvec", "windows-targets", ] @@ -2046,17 +1605,6 @@ dependencies = [ ] [[package]] -name = "password-hash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" -dependencies = [ - "base64ct", - "rand_core", - "subtle", -] - -[[package]] name = "paste" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2082,7 +1630,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -2091,7 +1639,7 @@ version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "serde", ] @@ -2103,9 +1651,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" +checksum = "ae9cee2a55a544be8b89dc6848072af97a20f2422603c10865be2a42b580fff5" dependencies = [ "memchr", "thiserror", @@ -2114,9 +1662,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" +checksum = "81d78524685f5ef2a3b3bd1cafbc9fcabb036253d9b1463e726a91cd16e2dfc2" dependencies = [ "pest", "pest_generator", @@ -2124,22 +1672,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" +checksum = "68bd1206e71118b5356dae5ddc61c8b11e28b09ef6a31acbd15ea48a28e0c227" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] name = "pest_meta" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" +checksum = "7c747191d4ad9e4a4ab9c8798f1e82a39affe7ef9648390b7e5548d18e099de6" dependencies = [ "once_cell", "pest", @@ -2185,12 +1733,6 @@ dependencies = [ ] [[package]] -name = "pico-args" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" - -[[package]] name = "pin-project-lite" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2236,6 +1778,20 @@ dependencies = [ ] [[package]] +name = "polling" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e53b6af1f60f36f8c2ac2aad5459d75a5a9b4be1e8cdd40264f315d78193e531" +dependencies = [ + "cfg-if", + "concurrent-queue", + "pin-project-lite", + "rustix 0.38.21", + "tracing", + "windows-sys", +] + +[[package]] name = "powerfmt" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2248,12 +1804,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] -name = "pq-sys" -version = "0.4.8" +name = "priv_sep" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" +checksum = "6d7312b9a65def240127a112f9e5c53ac9fa104d2abfb391355d5655ee7621c4" dependencies = [ - "vcpkg", + "libc", + "rustc_version", ] [[package]] @@ -2273,18 +1830,12 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", "version_check", "yansi 1.0.0-rc.1", ] [[package]] -name = "psl-types" -version = "2.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" - -[[package]] name = "psm" version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2294,38 +1845,6 @@ dependencies = [ ] [[package]] -name = "publicsuffix" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96a8c1bda5ae1af7f99a2962e49df150414a43d62404644d98dd5c3a93d07457" -dependencies = [ - "idna 0.3.0", - "psl-types", -] - -[[package]] -name = "quanta" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" -dependencies = [ - "crossbeam-utils", - "libc", - "mach2", - "once_cell", - "raw-cpuid", - "wasi", - "web-sys", - "winapi", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2368,35 +1887,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "raw-cpuid" -version = "10.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" -dependencies = [ - "bitflags 1.3.2", + "ppv-lite86", + "rand_core", ] [[package]] -name = "redox_syscall" -version = "0.3.5" +name = "rand_core" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "bitflags 1.3.2", + "getrandom", ] [[package]] @@ -2425,7 +1926,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -2473,87 +1974,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] -name = "reopen" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff42cec3acf85845f5b18b3cbb7fec619ccbd4a349f6ecbe1c62ab46d4d98293" -dependencies = [ - "autocfg", - "libc", - "signal-hook", -] - -[[package]] -name = "reqwest" -version = "0.11.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" -dependencies = [ - "async-compression", - "base64 0.21.4", - "bytes", - "cookie 0.16.2", - "cookie_store 0.16.2", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "serde", - "serde_json", - "serde_urlencoded", - "system-configuration", - "tokio", - "tokio-native-tls", - "tokio-socks", - "tokio-util", - "tower-service", - "trust-dns-resolver", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "winreg", -] - -[[package]] -name = "resolv-conf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin 0.5.2", - "untrusted 0.7.1", - "web-sys", - "winapi", -] - -[[package]] name = "ring" version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2562,8 +1982,8 @@ dependencies = [ "cc", "getrandom", "libc", - "spin 0.9.8", - "untrusted 0.9.0", + "spin", + "untrusted", "windows-sys", ] @@ -2637,7 +2057,7 @@ dependencies = [ "proc-macro2", "quote", "rocket_http", - "syn 2.0.38", + "syn", "unicode-xid", ] @@ -2646,7 +2066,7 @@ name = "rocket_http" version = "0.5.0-rc.3" source = "git+https://github.com/SergioBenitez/Rocket?rev=ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa#ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" dependencies = [ - "cookie 0.17.0", + "cookie", "either", "futures", "http", @@ -2680,37 +2100,25 @@ dependencies = [ ] [[package]] -name = "rpassword" -version = "7.2.0" +name = "rustc-demangle" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6678cf63ab3491898c0d021b493c94c9b221d91295294a2a5746eacbe5928322" -dependencies = [ - "libc", - "rtoolbox", - "winapi", -] +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] -name = "rtoolbox" -version = "0.0.1" +name = "rustc_version" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "034e22c514f5c0cb8a10ff341b9b048b5ceb21591f31c8f44c43b960f9b3524a" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "libc", - "winapi", + "semver", ] [[package]] -name = "rustc-demangle" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" - -[[package]] name = "rustix" -version = "0.37.26" +version = "0.37.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f3f8f960ed3b5a59055428714943298bf3fa2d4a1d53135084e0544829d995" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" dependencies = [ "bitflags 1.3.2", "errno", @@ -2722,46 +2130,46 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.20" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ce50cb2e16c2903e30d1cbccfd8387a74b9d4c938b6a4c5ec6cc7556f7a8a0" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.10", + "linux-raw-sys 0.4.11", "windows-sys", ] [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", - "ring 0.16.20", + "ring", "rustls-webpki", "sct", ] [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", ] [[package]] name = "rustls-webpki" -version = "0.101.6" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring", + "untrusted", ] [[package]] @@ -2817,12 +2225,12 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", + "ring", + "untrusted", ] [[package]] @@ -2856,9 +2264,9 @@ checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -2875,20 +2283,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", @@ -2897,37 +2305,14 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" dependencies = [ - "form_urlencoded", - "itoa", - "ryu", "serde", ] [[package]] -name = "sha-1" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] name = "sha1" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2959,16 +2344,6 @@ dependencies = [ ] [[package]] -name = "signal-hook" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" -dependencies = [ - "libc", - "signal-hook-registry", -] - -[[package]] name = "signal-hook-registry" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3006,9 +2381,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "socket2" @@ -3032,12 +2407,6 @@ dependencies = [ [[package]] name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" @@ -3074,12 +2443,6 @@ dependencies = [ ] [[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] name = "subtle" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3087,20 +2450,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -3108,49 +2460,15 @@ dependencies = [ ] [[package]] -name = "syslog" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7434e95bcccce1215d30f4bf84fe8c00e8de1b9be4fb736d747ca53d36e7f96f" -dependencies = [ - "error-chain", - "hostname", - "libc", - "log", - "time", -] - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand 2.0.1", - "redox_syscall 0.3.5", - "rustix 0.38.20", + "redox_syscall", + "rustix 0.38.21", "windows-sys", ] @@ -3171,7 +2489,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -3185,15 +2503,6 @@ dependencies = [ ] [[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] name = "time" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3201,8 +2510,6 @@ checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", - "libc", - "num_threads", "powerfmt", "serde", "time-core", @@ -3241,16 +2548,15 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.5.5", @@ -3260,13 +2566,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -3290,18 +2596,6 @@ dependencies = [ ] [[package]] -name = "tokio-socks" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51165dfa029d2a65969413a6cc96f354b86b464498702f174a4efa13608fd8c0" -dependencies = [ - "either", - "futures-util", - "thiserror", - "tokio", -] - -[[package]] name = "tokio-stream" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3326,16 +2620,15 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -3352,21 +2645,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.2" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.2", + "toml_edit 0.21.0", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -3377,7 +2670,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", @@ -3386,11 +2679,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "serde", "serde_spanned", "toml_datetime", @@ -3399,13 +2692,13 @@ dependencies = [ [[package]] name = "totp-lite" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc496875d9c8fe9a0ce19e3ee8e8808c60376831a439543f0aac71c9dd129fa" +checksum = "f8e43134db17199f7f721803383ac5854edd0d3d523cc34dba321d6acfbe76c3" dependencies = [ "digest", "hmac", - "sha-1", + "sha1", "sha2", ] @@ -3421,7 +2714,6 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -3435,7 +2727,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", ] [[package]] @@ -3450,12 +2742,12 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ - "lazy_static", "log", + "once_cell", "tracing-core", ] @@ -3478,52 +2770,6 @@ dependencies = [ ] [[package]] -name = "trust-dns-proto" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "559ac980345f7f5020883dd3bcacf176355225e01916f8c2efecad7534f682c6" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c723b0e608b24ad04c73b2607e0241b2c98fd79795a95e98b068b6966138a29d" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot", - "rand", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto", -] - -[[package]] name = "try-lock" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -3608,12 +2854,6 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "untrusted" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" @@ -3625,7 +2865,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna", "percent-encoding", "serde", ] @@ -3659,54 +2899,39 @@ checksum = "4a72e1902dde2bd6441347de2b70b7f5d59bf157c6c62f0c44572607a1d55bbe" [[package]] name = "vaultwarden" -version = "1.0.0" +version = "1.30.0" dependencies = [ - "argon2", "bytes", - "cached", "chrono", "chrono-tz", - "cookie 0.16.2", - "cookie_store 0.19.1", "dashmap", "data-encoding", - "data-url", "diesel", - "diesel_logger", "diesel_migrations", "dotenvy", "email_address", - "fern", "futures", "governor", "handlebars", - "html5gum", - "job_scheduler_ng", "jsonwebtoken", "lettre", "libsqlite3-sys", - "log", - "mimalloc", "num-derive", "num-traits", "once_cell", "openssl", - "openssl-sys", "paste", "percent-encoding", - "pico-args", + "priv_sep", "rand", "regex", - "reqwest", - "ring 0.17.5", + "ring", "rmpv", "rocket", "rocket_ws", - "rpassword", "semver", "serde", "serde_json", - "syslog", "time", "tokio", "tokio-tungstenite", @@ -3716,7 +2941,6 @@ dependencies = [ "uuid", "webauthn-rs", "which", - "yubico", ] [[package]] @@ -3764,9 +2988,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3774,24 +2998,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" dependencies = [ "cfg-if", "js-sys", @@ -3801,9 +3025,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3811,41 +3035,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" - -[[package]] -name = "wasm-streams" -version = "0.3.0" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4609d447824375f43e1ffbc051b50ad8f4b3ae8219680c94452ea05eb240ac7" -dependencies = [ - "futures-util", - "js-sys", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" dependencies = [ "js-sys", "wasm-bindgen", @@ -3879,17 +3090,11 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.20", + "rustix 0.38.21", "windows-sys", ] [[package]] -name = "widestring" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" - -[[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4006,24 +3211,14 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.17" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b" dependencies = [ "memchr", ] [[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys", -] - -[[package]] name = "yansi" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -4036,17 +3231,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1367295b8f788d371ce2dbc842c7b709c73ee1364d30351dd300ec2203b12377" [[package]] -name = "yubico" -version = "0.11.0" +name = "zerocopy" +version = "0.7.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "173f75d2c4010429a2d74ae3a114a69930c59e2b1a4c97b1c75d259a4960d5fb" +checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" dependencies = [ - "base64 0.13.1", - "form_urlencoded", - "futures", - "hmac", - "rand", - "reqwest", - "sha1", - "threadpool", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] diff --git a/Cargo.toml b/Cargo.toml @@ -1,189 +1,69 @@ [package] -name = "vaultwarden" -version = "1.0.0" authors = ["Daniel García <dani-garcia@users.noreply.github.com>"] edition = "2021" -rust-version = "1.71.1" -resolver = "2" - -repository = "https://github.com/dani-garcia/vaultwarden" -readme = "README.md" license = "AGPL-3.0-only" +name = "vaultwarden" publish = false -build = "build.rs" +readme = "README.md" +repository = "https://github.com/dani-garcia/vaultwarden" +resolver = "2" +version = "1.30.0" [features] -# default = ["sqlite"] -# Empty to keep compatibility, prefer to set USE_SYSLOG=true -enable_syslog = [] -mysql = ["diesel/mysql", "diesel_migrations/mysql"] -postgresql = ["diesel/postgres", "diesel_migrations/postgres"] -sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "libsqlite3-sys"] -# Enable to use a vendored and statically linked openssl -vendored_openssl = ["openssl/vendored"] -# Enable MiMalloc memory allocator to replace the default malloc -# This can improve performance for Alpine builds -enable_mimalloc = ["mimalloc"] -# This is a development dependency, and should only be used during development! -# It enables the usage of the diesel_logger crate, which is able to output the generated queries. -# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile -# if you want to turn off the logging for a specific run. -query_logger = ["diesel_logger"] - -# Enable unstable features, requires nightly -# Currently only used to enable rusts official ip support -unstable = [] +priv_sep = ["dep:priv_sep"] -[target."cfg(not(windows))".dependencies] -# Logging -syslog = "6.1.0" +[target.'cfg(target_os = "openbsd")'.dependencies] +priv_sep = { version = "0.8.1", default-features = false, features = ["openbsd"], optional = true } [dependencies] -# Logging -log = "0.4.20" -fern = { version = "0.6.2", features = ["syslog-6", "reopen-1"] } -tracing = { version = "0.1.40", features = ["log"] } # Needed to have lettre and webauthn-rs trace logging to work - -# A `dotenv` implementation for Rust +bytes = { version = "1.5.0", default-features = false } +chrono = { version = "0.4.31", default-features = false, features = ["serde"] } +chrono-tz = { version = "0.8.3", default-features = false } +dashmap = { version = "5.5.3", default-features = false } +data-encoding = { version = "2.4.0", default-features = false } +diesel = { version = "2.1.3", default-features = false, features = ["32-column-tables", "chrono", "r2d2", "sqlite"] } +diesel_migrations = { version = "2.1.0", default-features = false } dotenvy = { version = "0.15.7", default-features = false } - -# Lazy initialization -once_cell = "1.18.0" - -# Numerical libraries -num-traits = "0.2.17" -num-derive = "0.4.1" - -# Web framework -rocket = { version = "0.5.0-rc.3", features = ["tls", "json"], default-features = false } -# rocket_ws = { version ="0.1.0-rc.3" } -rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # v0.5 branch - -# WebSockets libraries -tokio-tungstenite = "0.19.0" -rmpv = "1.0.1" # MessagePack library - -# Concurrent HashMap used for WebSocket messaging and favicons -dashmap = "5.5.3" - -# Async futures -futures = "0.3.28" -tokio = { version = "1.33.0", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal"] } - -# A generic serialization/deserialization framework -serde = { version = "1.0.189", features = ["derive"] } -serde_json = "1.0.107" - -# A safe, extensible ORM and Query builder -diesel = { version = "2.1.3", features = ["chrono", "r2d2"] } -diesel_migrations = "2.1.0" -diesel_logger = { version = "0.3.0", optional = true } - -# Bundled/Static SQLite -libsqlite3-sys = { version = "0.26.0", features = ["bundled"], optional = true } - -# Crypto-related libraries -rand = { version = "0.8.5", features = ["small_rng"] } -ring = "0.17.5" - -# UUID generation -uuid = { version = "1.5.0", features = ["v4"] } - -# Date and time libraries -chrono = { version = "0.4.31", features = ["clock", "serde"], default-features = false } -chrono-tz = "0.8.3" -time = "0.3.30" - -# Job scheduler -job_scheduler_ng = "2.0.4" - -# Data encoding library Hex/Base32/Base64 -data-encoding = "2.4.0" - -# JWT library -jsonwebtoken = "9.0.0" - -# TOTP library -totp-lite = "2.0.0" - -# Yubico Library -yubico = { version = "0.11.0", features = ["online-tokio"], default-features = false } - -# WebAuthn libraries -webauthn-rs = "0.3.2" - -# Handling of URL's for WebAuthn and favicons -url = "2.4.1" - -# Email libraries -lettre = { version = "0.11.0", features = ["smtp-transport", "sendmail-transport", "builder", "serde", "tokio1-native-tls", "hostname", "tracing", "tokio1"], default-features = false } -percent-encoding = "2.3.0" # URL encoding library used for URL's in the emails -email_address = "0.2.4" - -# HTML Template library -handlebars = { version = "4.4.0", features = ["dir_source"] } - -# HTTP client (Used for favicons, version check, DUO and HIBP API) -reqwest = { version = "0.11.22", features = ["stream", "json", "deflate", "gzip", "brotli", "socks", "cookies", "trust-dns", "native-tls-alpn"] } - -# Favicon extraction libraries -html5gum = "0.5.7" -regex = { version = "1.10.2", features = ["std", "perf", "unicode-perl"], default-features = false } -data-url = "0.3.0" -bytes = "1.5.0" - -# Cache function results (Used for version check and favicon fetching) -cached = { version = "0.46.0", features = ["async"] } - -# Used for custom short lived cookie jar during favicon extraction -cookie = "0.16.2" -cookie_store = "0.19.1" - -# Used by U2F, JWT and PostgreSQL -openssl = "0.10.57" -# Set openssl-sys fixed to v0.9.92 to prevent building issues with musl, arm and 32bit pointer width -# It will force add a dynamically linked library which prevents the build from being static -openssl-sys = "=0.9.92" - -# CLI argument parsing -pico-args = "0.5.0" - -# Macro ident concatenation -paste = "1.0.14" -governor = "0.6.0" - -# Check client versions for specific features. -semver = "1.0.20" - -# Allow overriding the default memory allocator -# Mainly used for the musl builds, since the default musl malloc is very slow -mimalloc = { version = "0.1.39", features = ["secure"], default-features = false, optional = true } -which = "5.0.0" - -# Argon2 library with support for the PHC format -argon2 = "0.5.2" - -# Reading a password from the cli for generating the Argon2id ADMIN_TOKEN -rpassword = "7.2.0" - +email_address = { version = "0.2.4", default-features = false } +futures = { version = "0.3.28", default-features = false } +governor = { version = "0.6.0", default-features = false, features = ["dashmap", "std"] } +handlebars = { version = "4.4.0", default-features = false, features = ["dir_source"] } +jsonwebtoken = { version = "9.0.0", default-features = false, features = ["use_pem"] } +libsqlite3-sys = { version = "0.26.0", default-features = false, features = ["bundled"] } +lettre = { version = "0.11.0", default-features = false, features = ["builder", "sendmail-transport", "smtp-transport", "tokio1-native-tls"] } +num-derive = { version = "0.4.1", default-features = false } +num-traits = { version = "0.2.17", default-features = false } +once_cell = {version = "1.18.0", default-features = false } +openssl = { version = "0.10.59", default-features = false } +paste = { version = "1.0.14", default-features = false } +percent-encoding = { version = "2.3.0", default-features = false } +rand = { version = "0.8.5", default-features = false, features = ["small_rng"] } +regex = { version = "1.10.2", default-features = false, features = ["std"] } +ring = { version = "0.17.5", default-features = false } +rmpv = { version = "1.0.1", default-features = false } +rocket = { version = "0.5.0-rc.3", default-features = false, features = ["json", "tls"] } +rocket_ws = { git = "https://github.com/SergioBenitez/Rocket", rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa", default-features = false, features = ["tokio-tungstenite"] } +semver = { version = "1.0.20", default-features = false } +serde = { version = "1.0.189", default-features = false } +serde_json = { version = "1.0.107", default-features = false } +time = { version = "0.3.30", default-features = false } +tokio = { version = "1.33.0", default-features = false } +tokio-tungstenite = { version = "0.19.0", default-features = false } +totp-lite = { version = "2.0.0", default-features = false } +tracing = { version = "0.1.40", default-features = false } +url = { version = "2.4.1", default-features = false } +uuid = { version = "1.5.0", default-features = false, features = ["v4"] } +webauthn-rs = { version = "0.3.2", default-features = false, features = ["core"] } +which = { version = "5.0.0", default-features = false } [patch.crates-io] -rocket = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch -# rocket_ws = { git = 'https://github.com/SergioBenitez/Rocket', rev = 'ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa' } # v0.5 branch - - -# Strip debuginfo from the release builds -# Also enable thin LTO for some optimizations -[profile.release] -strip = "debuginfo" -lto = "thin" - +rocket = { git = "https://github.com/SergioBenitez/Rocket", rev = "ce441b5f46fdf5cd99cb32b8b8638835e4c2a5fa" } # A little bit of a speedup [profile.dev] split-debuginfo = "unpacked" -# Always build argon2 using opt-level 3 -# This is a huge speed improvement during testing -[profile.dev.package.argon2] -opt-level = 3 +[profile.release] +lto = true +panic = 'abort' +strip = true diff --git a/Dockerfile b/Dockerfile @@ -1 +0,0 @@ -docker/Dockerfile.debian -\ No newline at end of file diff --git a/README.md b/README.md @@ -1,95 +0,0 @@ -### Alternative implementation of the Bitwarden server API written in Rust and compatible with [upstream Bitwarden clients](https://bitwarden.com/download/)*, perfect for self-hosted deployment where running the official resource-heavy service might not be ideal. - -📢 Note: This project was known as Bitwarden_RS and has been renamed to separate itself from the official Bitwarden server in the hopes of avoiding confusion and trademark/branding issues. Please see [#1642](https://github.com/dani-garcia/vaultwarden/discussions/1642) for more explanation. - ---- -[![Build](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml/badge.svg)](https://github.com/dani-garcia/vaultwarden/actions/workflows/build.yml) -[![ghcr.io](https://img.shields.io/badge/ghcr.io-download-blue)](https://github.com/dani-garcia/vaultwarden/pkgs/container/vaultwarden) -[![Docker Pulls](https://img.shields.io/docker/pulls/vaultwarden/server.svg)](https://hub.docker.com/r/vaultwarden/server) -[![Quay.io](https://img.shields.io/badge/Quay.io-download-blue)](https://quay.io/repository/vaultwarden/server) -[![Dependency Status](https://deps.rs/repo/github/dani-garcia/vaultwarden/status.svg)](https://deps.rs/repo/github/dani-garcia/vaultwarden) -[![GitHub Release](https://img.shields.io/github/release/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/releases/latest) -[![AGPL-3.0 Licensed](https://img.shields.io/github/license/dani-garcia/vaultwarden.svg)](https://github.com/dani-garcia/vaultwarden/blob/main/LICENSE.txt) -[![Matrix Chat](https://img.shields.io/matrix/vaultwarden:matrix.org.svg?logo=matrix)](https://matrix.to/#/#vaultwarden:matrix.org) - -Image is based on [Rust implementation of Bitwarden API](https://github.com/dani-garcia/vaultwarden). - -**This project is not associated with the [Bitwarden](https://bitwarden.com/) project nor Bitwarden, Inc.** - -#### ⚠️**IMPORTANT**⚠️: When using this server, please report any bugs or suggestions to us directly (look at the bottom of this page for ways to get in touch), regardless of whatever clients you are using (mobile, desktop, browser...). DO NOT use the official support channels. - ---- - -## Features - -Basically full implementation of Bitwarden API is provided including: - - * Organizations support - * Attachments and Send - * Vault API support - * Serving the static files for Vault interface - * Website icons API - * Authenticator and U2F support - * YubiKey and Duo support - * Emergency Access - -## Installation -Pull the docker image and mount a volume from the host for persistent storage: - -```sh -docker pull vaultwarden/server:latest -docker run -d --name vaultwarden -v /vw-data/:/data/ --restart unless-stopped -p 80:80 vaultwarden/server:latest -``` -This will preserve any persistent data under /vw-data/, you can adapt the path to whatever suits you. - -**IMPORTANT**: Most modern web browsers disallow the use of Web Crypto APIs in insecure contexts. In this case, you might get an error like `Cannot read property 'importKey'`. To solve this problem, you need to access the web vault via HTTPS or localhost. - -This can be configured in [vaultwarden directly](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-HTTPS) or using a third-party reverse proxy ([some examples](https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples)). - -If you have an available domain name, you can get HTTPS certificates with [Let's Encrypt](https://letsencrypt.org/), or you can generate self-signed certificates with utilities like [mkcert](https://github.com/FiloSottile/mkcert). Some proxies automatically do this step, like Caddy (see examples linked above). - -## Usage -See the [vaultwarden wiki](https://github.com/dani-garcia/vaultwarden/wiki) for more information on how to configure and run the vaultwarden server. - -## Get in touch -To ask a question, offer suggestions or new features or to get help configuring or installing the software, please use [GitHub Discussions](https://github.com/dani-garcia/vaultwarden/discussions) or [the forum](https://vaultwarden.discourse.group/). - -If you spot any bugs or crashes with vaultwarden itself, please [create an issue](https://github.com/dani-garcia/vaultwarden/issues/). Make sure you are on the latest version and there aren't any similar issues open, though! - -If you prefer to chat, we're usually hanging around at [#vaultwarden:matrix.org](https://matrix.to/#/#vaultwarden:matrix.org) room on Matrix. Feel free to join us! - -### Sponsors -Thanks for your contribution to the project! - -<!-- -<table> - <tr> - <td align="center"> - <a href="https://github.com/username"> - <img src="https://avatars.githubusercontent.com/u/725423?s=75&v=4" width="75px;" alt="username"/> - <br /> - <sub><b>username</b></sub> - </a> - </td> - </tr> -</table> - -<br/> ---> - -<table> - <tr> - <td align="center"> - <a href="https://github.com/themightychris" style="width: 75px"> - <sub><b>Chris Alfano</b></sub> - </a> - </td> - </tr> - <tr> - <td align="center"> - <a href="https://github.com/numberly" style="width: 75px"> - <sub><b>Numberly</b></sub> - </a> - </td> - </tr> -</table> diff --git a/SECURITY.md b/SECURITY.md @@ -1,45 +0,0 @@ -Vaultwarden tries to prevent security issues but there could always slip something through. -If you believe you've found a security issue in our application, we encourage you to -notify us. We welcome working with you to resolve the issue promptly. Thanks in advance! - -# Disclosure Policy - -- Let us know as soon as possible upon discovery of a potential security issue, and we'll make every - effort to quickly resolve the issue. -- Provide us a reasonable amount of time to resolve the issue before any disclosure to the public or a - third-party. We may publicly disclose the issue before resolving it, if appropriate. -- Make a good faith effort to avoid privacy violations, destruction of data, and interruption or - degradation of our service. Only interact with accounts you own or with explicit permission of the - account holder. - -# In-scope - -- Security issues in any current release of Vaultwarden. Source code is available at https://github.com/dani-garcia/vaultwarden. This includes the current `latest` release and `main / testing` release. - -# Exclusions - -The following bug classes are out-of scope: - -- Bugs that are already reported on Vaultwarden's issue tracker (https://github.com/dani-garcia/vaultwarden/issues) -- Bugs that are not part of Vaultwarden, like on the the web-vault or mobile and desktop clients. These issues need to be reported in the respective project issue tracker at https://github.com/bitwarden to which we are not associated -- Issues in an upstream software dependency (ex: Rust, or External Libraries) which are already reported to the upstream maintainer -- Attacks requiring physical access to a user's device -- Issues related to software or protocols not under Vaultwarden's control -- Vulnerabilities in outdated versions of Vaultwarden -- Missing security best practices that do not directly lead to a vulnerability (You may still report them as a normal issue) -- Issues that do not have any impact on the general public - -While researching, we'd like to ask you to refrain from: - -- Denial of service -- Spamming -- Social engineering (including phishing) of Vaultwarden developers, contributors or users - -Thank you for helping keep Vaultwarden and our users safe! - -# How to contact us - -- You can contact us on Matrix https://matrix.to/#/#vaultwarden:matrix.org (user: `@danig:matrix.org`) -- You can send an ![security-contact](/.github/security-contact.gif) to report a security issue. - - If you want to send an encrypted email you can use the following GPG key:<br> - https://keyserver.ubuntu.com/pks/lookup?search=0xB9B7A108373276BF3C0406F9FC8A7D14C3CD543A&fingerprint=on&op=index diff --git a/build.rs b/build.rs @@ -1,80 +1,3 @@ -use std::env; -use std::process::Command; - fn main() { - // This allow using #[cfg(sqlite)] instead of #[cfg(feature = "sqlite")], which helps when trying to add them through macros - #[cfg(feature = "sqlite")] println!("cargo:rustc-cfg=sqlite"); - #[cfg(feature = "mysql")] - println!("cargo:rustc-cfg=mysql"); - #[cfg(feature = "postgresql")] - println!("cargo:rustc-cfg=postgresql"); - #[cfg(feature = "query_logger")] - println!("cargo:rustc-cfg=query_logger"); - - #[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))] - compile_error!( - "You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite" - ); - - #[cfg(all(not(debug_assertions), feature = "query_logger"))] - compile_error!("Query Logging is only allowed during development, it is not intended for production usage!"); - - // Support $BWRS_VERSION for legacy compatibility, but default to $VW_VERSION. - // If neither exist, read from git. - let maybe_vaultwarden_version = - env::var("VW_VERSION").or_else(|_| env::var("BWRS_VERSION")).or_else(|_| version_from_git_info()); - - if let Ok(version) = maybe_vaultwarden_version { - println!("cargo:rustc-env=VW_VERSION={version}"); - println!("cargo:rustc-env=CARGO_PKG_VERSION={version}"); - } -} - -fn run(args: &[&str]) -> Result<String, std::io::Error> { - let out = Command::new(args[0]).args(&args[1..]).output()?; - if !out.status.success() { - use std::io::{Error, ErrorKind}; - return Err(Error::new(ErrorKind::Other, "Command not successful")); - } - Ok(String::from_utf8(out.stdout).unwrap().trim().to_string()) -} - -/// This method reads info from Git, namely tags, branch, and revision -/// To access these values, use: -/// - env!("GIT_EXACT_TAG") -/// - env!("GIT_LAST_TAG") -/// - env!("GIT_BRANCH") -/// - env!("GIT_REV") -/// - env!("VW_VERSION") -fn version_from_git_info() -> Result<String, std::io::Error> { - // The exact tag for the current commit, can be empty when - // the current commit doesn't have an associated tag - let exact_tag = run(&["git", "describe", "--abbrev=0", "--tags", "--exact-match"]).ok(); - if let Some(ref exact) = exact_tag { - println!("cargo:rustc-env=GIT_EXACT_TAG={exact}"); - } - - // The last available tag, equal to exact_tag when - // the current commit is tagged - let last_tag = run(&["git", "describe", "--abbrev=0", "--tags"])?; - println!("cargo:rustc-env=GIT_LAST_TAG={last_tag}"); - - // The current branch name - let branch = run(&["git", "rev-parse", "--abbrev-ref", "HEAD"])?; - println!("cargo:rustc-env=GIT_BRANCH={branch}"); - - // The current git commit hash - let rev = run(&["git", "rev-parse", "HEAD"])?; - let rev_short = rev.get(..8).unwrap_or_default(); - println!("cargo:rustc-env=GIT_REV={rev_short}"); - - // Combined version - if let Some(exact) = exact_tag { - Ok(exact) - } else if &branch != "main" && &branch != "master" && &branch != "HEAD" { - Ok(format!("{last_tag}-{rev_short} ({branch})")) - } else { - Ok(format!("{last_tag}-{rev_short}")) - } } diff --git a/diesel.toml b/diesel.toml @@ -2,4 +2,4 @@ # see diesel.rs/guides/configuring-diesel-cli [print_schema] -file = "src/db/schema.rs" -\ No newline at end of file +file = "src/db/schema.rs" diff --git a/docker/DockerSettings.yaml b/docker/DockerSettings.yaml @@ -1,28 +0,0 @@ ---- -vault_version: "v2023.10.0" -vault_image_digest: "sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935" -# Cross Compile Docker Helper Scripts v1.3.0 -# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts -xx_image_digest: "sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc" -rust_version: 1.73.0 # Rust version to be used -debian_version: bookworm # Debian release name to be used -alpine_version: 3.18 # Alpine version to be used -# For which platforms/architectures will we try to build images -platforms: ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"] -# Determine the build images per OS/Arch -build_stage_image: - debian: - image: "docker.io/library/rust:{{rust_version}}-slim-{{debian_version}}" - platform: "$BUILDPLATFORM" - alpine: - image: "build_${TARGETARCH}${TARGETVARIANT}" - platform: "linux/amd64" # The Alpine build images only have linux/amd64 images - arch_image: - amd64: "ghcr.io/blackdex/rust-musl:x86_64-musl-stable-{{rust_version}}" - arm64: "ghcr.io/blackdex/rust-musl:aarch64-musl-stable-{{rust_version}}" - armv7: "ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-{{rust_version}}" - armv6: "ghcr.io/blackdex/rust-musl:arm-musleabi-stable-{{rust_version}}" -# The final image which will be used to distribute the container images -runtime_stage_image: - debian: "docker.io/library/debian:{{debian_version}}-slim" - alpine: "docker.io/library/alpine:{{alpine_version}}" diff --git a/docker/Dockerfile.alpine b/docker/Dockerfile.alpine @@ -1,160 +0,0 @@ -# syntax=docker/dockerfile:1 - -# This file was generated using a Jinja2 template. -# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make` -# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine` - -# Using multistage build: -# https://docs.docker.com/develop/develop-images/multistage-build/ -# https://whitfin.io/speeding-up-rust-docker-builds/ - -####################### VAULT BUILD IMAGE ####################### -# The web-vault digest specifies a particular web-vault build on Docker Hub. -# Using the digest instead of the tag name provides better security, -# as the digest of an image is immutable, whereas a tag name can later -# be changed to point to a malicious image. -# -# To verify the current digest for a given tag name: -# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, -# click the tag name to view the digest of the image it currently points to. -# - From the command line: -# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0 -# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0 -# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935] -# -# - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 -# [docker.io/vaultwarden/web-vault:v2023.10.0] -# -FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault - -########################## ALPINE BUILD IMAGES ########################## -## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 -## And for Alpine we define all build images here, they will only be loaded when actually used -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.73.0 as build_amd64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.73.0 as build_arm64 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.73.0 as build_armv7 -FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.73.0 as build_armv6 - -########################## BUILD IMAGE ########################## -# hadolint ignore=DL3006 -FROM --platform=linux/amd64 build_${TARGETARCH}${TARGETVARIANT} as build -ARG TARGETARCH -ARG TARGETVARIANT -ARG TARGETPLATFORM - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -# Build time options to avoid dpkg warnings and help with reproducible builds. -ENV DEBIAN_FRONTEND=noninteractive \ - LANG=C.UTF-8 \ - TZ=UTC \ - TERM=xterm-256color \ - CARGO_HOME="/root/.cargo" \ - USER="root" \ - # Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11 - # Debian Bookworm already contains libpq v15 - PQ_LIB_DIR="/usr/local/musl/pq15/lib" - - -# Create CARGO_HOME folder and don't download rust docs -RUN mkdir -pv "${CARGO_HOME}" \ - && rustup set profile minimal - -# Creates a dummy project used to grab dependencies -RUN USER=root cargo new --bin /app -WORKDIR /app - -# Shared variables across Debian and Alpine -RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \ - # To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic - if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \ - # Output the current contents of the file - cat /env-cargo - -# Enable MiMalloc to improve performance on Alpine builds -ARG DB=sqlite,mysql,postgresql,enable_mimalloc - -RUN source /env-cargo && \ - rustup target add "${CARGO_TARGET}" - -ARG CARGO_PROFILE=release -ARG VW_VERSION - -# Copies over *only* your manifests and build files -COPY ./Cargo.* ./ -COPY ./rust-toolchain.toml ./rust-toolchain.toml -COPY ./build.rs ./build.rs - -# Builds your dependencies and removes the -# dummy project, except the target folder -# This folder contains the compiled dependencies -RUN source /env-cargo && \ - cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \ - find . -not -path "./target*" -delete - -# Copies the complete project -# To avoid copying unneeded files, use .dockerignore -COPY . . - -# Builds again, this time it will be the actual source files being build -RUN source /env-cargo && \ - # Make sure that we actually build the project by updating the src/main.rs timestamp - touch src/main.rs && \ - # Create a symlink to the binary target folder to easy copy the binary in the final stage - cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \ - if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \ - ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \ - else \ - ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \ - fi - - -######################## RUNTIME IMAGE ######################## -# Create a new stage with a minimal image -# because we already have a binary built -# -# To build these images you need to have qemu binfmt support. -# See the following pages to help install these tools locally -# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation -# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64 -# -# Or use a Docker image which modifies your host system to support this. -# The GitHub Actions Workflow uses the same image as used below. -# See: https://github.com/tonistiigi/binfmt -# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm -# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*' -# -# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742 -FROM --platform=$TARGETPLATFORM docker.io/library/alpine:3.18 - -ENV ROCKET_PROFILE="release" \ - ROCKET_ADDRESS=0.0.0.0 \ - ROCKET_PORT=80 \ - SSL_CERT_DIR=/etc/ssl/certs - -# Create data folder and Install needed libraries -RUN mkdir /data && \ - apk --no-cache add \ - ca-certificates \ - curl \ - openssl \ - tzdata - -VOLUME /data -EXPOSE 80 -EXPOSE 3012 - -# Copies the files from the context (Rocket.toml file and web-vault) -# and the binary from the "build" stage to the current stage -WORKDIR / - -COPY docker/healthcheck.sh /healthcheck.sh -COPY docker/start.sh /start.sh - -COPY --from=vault /web-vault ./web-vault -COPY --from=build /app/target/final/vaultwarden . - -HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] - -CMD ["/start.sh"] diff --git a/docker/Dockerfile.debian b/docker/Dockerfile.debian @@ -1,194 +0,0 @@ -# syntax=docker/dockerfile:1 - -# This file was generated using a Jinja2 template. -# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make` -# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine` - -# Using multistage build: -# https://docs.docker.com/develop/develop-images/multistage-build/ -# https://whitfin.io/speeding-up-rust-docker-builds/ - -####################### VAULT BUILD IMAGE ####################### -# The web-vault digest specifies a particular web-vault build on Docker Hub. -# Using the digest instead of the tag name provides better security, -# as the digest of an image is immutable, whereas a tag name can later -# be changed to point to a malicious image. -# -# To verify the current digest for a given tag name: -# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, -# click the tag name to view the digest of the image it currently points to. -# - From the command line: -# $ docker pull docker.io/vaultwarden/web-vault:v2023.10.0 -# $ docker image inspect --format "{{.RepoDigests}}" docker.io/vaultwarden/web-vault:v2023.10.0 -# [docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935] -# -# - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{.RepoTags}}" docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 -# [docker.io/vaultwarden/web-vault:v2023.10.0] -# -FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:419e4976921f98f1124f296ed02e68bf7f8ff29b3f1fba59e7e715228a065935 as vault - -########################## Cross Compile Docker Helper Scripts ########################## -## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts -## And these bash scripts do not have any significant difference if at all -FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:c9609ace652bbe51dd4ce90e0af9d48a4590f1214246da5bc70e46f6dd586edc AS xx - -########################## BUILD IMAGE ########################## -# hadolint ignore=DL3006 -FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.73.0-slim-bookworm as build -COPY --from=xx / / -ARG TARGETARCH -ARG TARGETVARIANT -ARG TARGETPLATFORM - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -# Build time options to avoid dpkg warnings and help with reproducible builds. -ENV DEBIAN_FRONTEND=noninteractive \ - LANG=C.UTF-8 \ - TZ=UTC \ - TERM=xterm-256color \ - CARGO_HOME="/root/.cargo" \ - USER="root" - -# Install clang to get `xx-cargo` working -# Install pkg-config to allow amd64 builds to find all libraries -# Install git so build.rs can determine the correct version -# Install the libc cross packages based upon the debian-arch -RUN apt-get update && \ - apt-get install -y \ - --no-install-recommends \ - clang \ - pkg-config \ - git \ - "libc6-$(xx-info debian-arch)-cross" \ - "libc6-dev-$(xx-info debian-arch)-cross" \ - "linux-libc-dev-$(xx-info debian-arch)-cross" && \ - # Run xx-cargo early, since it sometimes seems to break when run at a later stage - echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo - -RUN xx-apt-get install -y \ - --no-install-recommends \ - gcc \ - libmariadb3 \ - libpq-dev \ - libpq5 \ - libssl-dev && \ - # Force install arch dependend mariadb dev packages - # Installing them the normal way breaks several other packages (again) - apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \ - dpkg --force-all -i ./libmariadb-dev*.deb - -# Create CARGO_HOME folder and don't download rust docs -RUN mkdir -pv "${CARGO_HOME}" \ - && rustup set profile minimal - -# Creates a dummy project used to grab dependencies -RUN USER=root cargo new --bin /app -WORKDIR /app - -# Environment variables for cargo across Debian and Alpine -RUN source /env-cargo && \ - if xx-info is-cross ; then \ - # We can't use xx-cargo since that uses clang, which doesn't work for our libraries. - # Because of this we generate the needed environment variables here which we can load in the needed steps. - echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ - echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ - echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \ - echo "export CROSS_COMPILE=1" >> /env-cargo && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \ - fi && \ - # Output the current contents of the file - cat /env-cargo - -# Configure the DB ARG as late as possible to not invalidate the cached layers above -ARG DB=sqlite,mysql,postgresql - -RUN source /env-cargo && \ - rustup target add "${CARGO_TARGET}" - -ARG CARGO_PROFILE=release -ARG VW_VERSION - -# Copies over *only* your manifests and build files -COPY ./Cargo.* ./ -COPY ./rust-toolchain.toml ./rust-toolchain.toml -COPY ./build.rs ./build.rs - -# Builds your dependencies and removes the -# dummy project, except the target folder -# This folder contains the compiled dependencies -RUN source /env-cargo && \ - cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \ - find . -not -path "./target*" -delete - -# Copies the complete project -# To avoid copying unneeded files, use .dockerignore -COPY . . - -# Builds again, this time it will be the actual source files being build -RUN source /env-cargo && \ - # Make sure that we actually build the project by updating the src/main.rs timestamp - touch src/main.rs && \ - # Create a symlink to the binary target folder to easy copy the binary in the final stage - cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \ - if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \ - ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \ - else \ - ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \ - fi - - -######################## RUNTIME IMAGE ######################## -# Create a new stage with a minimal image -# because we already have a binary built -# -# To build these images you need to have qemu binfmt support. -# See the following pages to help install these tools locally -# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation -# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64 -# -# Or use a Docker image which modifies your host system to support this. -# The GitHub Actions Workflow uses the same image as used below. -# See: https://github.com/tonistiigi/binfmt -# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm -# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*' -# -# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742 -FROM --platform=$TARGETPLATFORM docker.io/library/debian:bookworm-slim - -ENV ROCKET_PROFILE="release" \ - ROCKET_ADDRESS=0.0.0.0 \ - ROCKET_PORT=80 \ - DEBIAN_FRONTEND=noninteractive - -# Create data folder and Install needed libraries -RUN mkdir /data && \ - apt-get update && apt-get install -y \ - --no-install-recommends \ - ca-certificates \ - curl \ - libmariadb-dev-compat \ - libpq5 \ - openssl && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -VOLUME /data -EXPOSE 80 -EXPOSE 3012 - -# Copies the files from the context (Rocket.toml file and web-vault) -# and the binary from the "build" stage to the current stage -WORKDIR / - -COPY docker/healthcheck.sh /healthcheck.sh -COPY docker/start.sh /start.sh - -COPY --from=vault /web-vault ./web-vault -COPY --from=build /app/target/final/vaultwarden . - -HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] - -CMD ["/start.sh"] diff --git a/docker/Dockerfile.j2 b/docker/Dockerfile.j2 @@ -1,237 +0,0 @@ -# syntax=docker/dockerfile:1 - -# This file was generated using a Jinja2 template. -# Please make your changes in `DockerSettings.yaml` or `Dockerfile.j2` and then `make` -# This will generate two Dockerfile's `Dockerfile.debian` and `Dockerfile.alpine` - -# Using multistage build: -# https://docs.docker.com/develop/develop-images/multistage-build/ -# https://whitfin.io/speeding-up-rust-docker-builds/ - -####################### VAULT BUILD IMAGE ####################### -# The web-vault digest specifies a particular web-vault build on Docker Hub. -# Using the digest instead of the tag name provides better security, -# as the digest of an image is immutable, whereas a tag name can later -# be changed to point to a malicious image. -# -# To verify the current digest for a given tag name: -# - From https://hub.docker.com/r/vaultwarden/web-vault/tags, -# click the tag name to view the digest of the image it currently points to. -# - From the command line: -# $ docker pull docker.io/vaultwarden/web-vault:{{ vault_version }} -# $ docker image inspect --format "{{ '{{' }}.RepoDigests}}" docker.io/vaultwarden/web-vault:{{ vault_version }} -# [docker.io/vaultwarden/web-vault@{{ vault_image_digest }}] -# -# - Conversely, to get the tag name from the digest: -# $ docker image inspect --format "{{ '{{' }}.RepoTags}}" docker.io/vaultwarden/web-vault@{{ vault_image_digest }} -# [docker.io/vaultwarden/web-vault:{{ vault_version }}] -# -FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@{{ vault_image_digest }} as vault - -{% if base == "debian" %} -########################## Cross Compile Docker Helper Scripts ########################## -## We use the linux/amd64 no matter which Build Platform, since these are all bash scripts -## And these bash scripts do not have any significant difference if at all -FROM --platform=linux/amd64 docker.io/tonistiigi/xx@{{ xx_image_digest }} AS xx -{% elif base == "alpine" %} -########################## ALPINE BUILD IMAGES ########################## -## NOTE: The Alpine Base Images do not support other platforms then linux/amd64 -## And for Alpine we define all build images here, they will only be loaded when actually used -{% for arch in build_stage_image[base].arch_image %} -FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].arch_image[arch] }} as build_{{ arch }} -{% endfor %} -{% endif %} - -########################## BUILD IMAGE ########################## -# hadolint ignore=DL3006 -FROM --platform={{ build_stage_image[base].platform }} {{ build_stage_image[base].image }} as build -{% if base == "debian" %} -COPY --from=xx / / -{% endif %} -ARG TARGETARCH -ARG TARGETVARIANT -ARG TARGETPLATFORM - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -# Build time options to avoid dpkg warnings and help with reproducible builds. -ENV DEBIAN_FRONTEND=noninteractive \ - LANG=C.UTF-8 \ - TZ=UTC \ - TERM=xterm-256color \ - CARGO_HOME="/root/.cargo" \ - USER="root" -{%- if base == "alpine" %} \ - # Use PostgreSQL v15 during Alpine/MUSL builds instead of the default v11 - # Debian Bookworm already contains libpq v15 - PQ_LIB_DIR="/usr/local/musl/pq15/lib" -{% endif %} - -{% if base == "debian" %} - -# Install clang to get `xx-cargo` working -# Install pkg-config to allow amd64 builds to find all libraries -# Install git so build.rs can determine the correct version -# Install the libc cross packages based upon the debian-arch -RUN apt-get update && \ - apt-get install -y \ - --no-install-recommends \ - clang \ - pkg-config \ - git \ - "libc6-$(xx-info debian-arch)-cross" \ - "libc6-dev-$(xx-info debian-arch)-cross" \ - "linux-libc-dev-$(xx-info debian-arch)-cross" && \ - # Run xx-cargo early, since it sometimes seems to break when run at a later stage - echo "export CARGO_TARGET=$(xx-cargo --print-target-triple)" >> /env-cargo - -RUN xx-apt-get install -y \ - --no-install-recommends \ - gcc \ - libmariadb3 \ - libpq-dev \ - libpq5 \ - libssl-dev && \ - # Force install arch dependend mariadb dev packages - # Installing them the normal way breaks several other packages (again) - apt-get download "libmariadb-dev-compat:$(xx-info debian-arch)" "libmariadb-dev:$(xx-info debian-arch)" && \ - dpkg --force-all -i ./libmariadb-dev*.deb -{% endif %} - -# Create CARGO_HOME folder and don't download rust docs -RUN mkdir -pv "${CARGO_HOME}" \ - && rustup set profile minimal - -# Creates a dummy project used to grab dependencies -RUN USER=root cargo new --bin /app -WORKDIR /app - -{% if base == "debian" %} -# Environment variables for cargo across Debian and Alpine -RUN source /env-cargo && \ - if xx-info is-cross ; then \ - # We can't use xx-cargo since that uses clang, which doesn't work for our libraries. - # Because of this we generate the needed environment variables here which we can load in the needed steps. - echo "export CC_$(echo "${CARGO_TARGET}" | tr '[:upper:]' '[:lower:]' | tr - _)=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ - echo "export CARGO_TARGET_$(echo "${CARGO_TARGET}" | tr '[:lower:]' '[:upper:]' | tr - _)_LINKER=/usr/bin/$(xx-info)-gcc" >> /env-cargo && \ - echo "export PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /env-cargo && \ - echo "export CROSS_COMPILE=1" >> /env-cargo && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/$(xx-info)" >> /env-cargo && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/$(xx-info)" >> /env-cargo ; \ - fi && \ - # Output the current contents of the file - cat /env-cargo - -# Configure the DB ARG as late as possible to not invalidate the cached layers above -ARG DB=sqlite,mysql,postgresql -{% elif base == "alpine" %} -# Shared variables across Debian and Alpine -RUN echo "export CARGO_TARGET=${RUST_MUSL_CROSS_TARGET}" >> /env-cargo && \ - # To be able to build the armv6 image with mimalloc we need to tell the linker to also look for libatomic - if [[ "${TARGETARCH}${TARGETVARIANT}" == "armv6" ]] ; then echo "export RUSTFLAGS='-Clink-arg=-latomic'" >> /env-cargo ; fi && \ - # Output the current contents of the file - cat /env-cargo - -# Enable MiMalloc to improve performance on Alpine builds -ARG DB=sqlite,mysql,postgresql,enable_mimalloc -{% endif %} - -RUN source /env-cargo && \ - rustup target add "${CARGO_TARGET}" - -ARG CARGO_PROFILE=release -ARG VW_VERSION - -# Copies over *only* your manifests and build files -COPY ./Cargo.* ./ -COPY ./rust-toolchain.toml ./rust-toolchain.toml -COPY ./build.rs ./build.rs - -# Builds your dependencies and removes the -# dummy project, except the target folder -# This folder contains the compiled dependencies -RUN source /env-cargo && \ - cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \ - find . -not -path "./target*" -delete - -# Copies the complete project -# To avoid copying unneeded files, use .dockerignore -COPY . . - -# Builds again, this time it will be the actual source files being build -RUN source /env-cargo && \ - # Make sure that we actually build the project by updating the src/main.rs timestamp - touch src/main.rs && \ - # Create a symlink to the binary target folder to easy copy the binary in the final stage - cargo build --features ${DB} --profile "${CARGO_PROFILE}" --target="${CARGO_TARGET}" && \ - if [[ "${CARGO_PROFILE}" == "dev" ]] ; then \ - ln -vfsr "/app/target/${CARGO_TARGET}/debug" /app/target/final ; \ - else \ - ln -vfsr "/app/target/${CARGO_TARGET}/${CARGO_PROFILE}" /app/target/final ; \ - fi - - -######################## RUNTIME IMAGE ######################## -# Create a new stage with a minimal image -# because we already have a binary built -# -# To build these images you need to have qemu binfmt support. -# See the following pages to help install these tools locally -# Ubuntu/Debian: https://wiki.debian.org/QemuUserEmulation -# Arch Linux: https://wiki.archlinux.org/title/QEMU#Chrooting_into_arm/arm64_environment_from_x86_64 -# -# Or use a Docker image which modifies your host system to support this. -# The GitHub Actions Workflow uses the same image as used below. -# See: https://github.com/tonistiigi/binfmt -# Usage: docker run --privileged --rm tonistiigi/binfmt --install arm64,arm -# To uninstall: docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*' -# -# We need to add `--platform` here, because of a podman bug: https://github.com/containers/buildah/issues/4742 -FROM --platform=$TARGETPLATFORM {{ runtime_stage_image[base] }} - -ENV ROCKET_PROFILE="release" \ - ROCKET_ADDRESS=0.0.0.0 \ - ROCKET_PORT=80 -{%- if base == "debian" %} \ - DEBIAN_FRONTEND=noninteractive -{% elif base == "alpine" %} \ - SSL_CERT_DIR=/etc/ssl/certs -{% endif %} - -# Create data folder and Install needed libraries -RUN mkdir /data && \ -{% if base == "debian" %} - apt-get update && apt-get install -y \ - --no-install-recommends \ - ca-certificates \ - curl \ - libmariadb-dev-compat \ - libpq5 \ - openssl && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* -{% elif base == "alpine" %} - apk --no-cache add \ - ca-certificates \ - curl \ - openssl \ - tzdata -{% endif %} - -VOLUME /data -EXPOSE 80 -EXPOSE 3012 - -# Copies the files from the context (Rocket.toml file and web-vault) -# and the binary from the "build" stage to the current stage -WORKDIR / - -COPY docker/healthcheck.sh /healthcheck.sh -COPY docker/start.sh /start.sh - -COPY --from=vault /web-vault ./web-vault -COPY --from=build /app/target/final/vaultwarden . - -HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"] - -CMD ["/start.sh"] diff --git a/docker/Makefile b/docker/Makefile @@ -1,4 +0,0 @@ -all: - ./render_template Dockerfile.j2 '{"base": "debian"}' > Dockerfile.debian - ./render_template Dockerfile.j2 '{"base": "alpine"}' > Dockerfile.alpine -.PHONY: all diff --git a/docker/README.md b/docker/README.md @@ -1,183 +0,0 @@ -# Vaultwarden Container Building - -To build and release new testing and stable releases of Vaultwarden we use `docker buildx bake`.<br> -This can be used locally by running the command yourself, but it is also used by GitHub Actions. - -This makes it easier for us to test and maintain the different architectures we provide.<br> -We also just have two Dockerfile's one for Debian and one for Alpine based images.<br> -With just these two files we can build both Debian and Alpine images for the following platforms: - - amd64 (linux/amd64) - - arm64 (linux/arm64) - - armv7 (linux/arm/v7) - - armv6 (linux/arm/v6) - -To build these containers you need to enable QEMU binfmt support to be able to run/emulate architectures which are different then your host.<br> -This ensures the container build process can run binaries from other architectures.<br> - -**NOTE**: Run all the examples below from the root of the repo.<br> - - -## How to install QEMU binfmt support - -This is different per host OS, but most support this in some way.<br> - -### Ubuntu/Debian -```bash -apt install binfmt-support qemu-user-static -``` - -### Arch Linux (others based upon it) -```bash -pacman -S qemu-user-static qemu-user-static-binfmt -``` - -### Fedora -```bash -dnf install qemu-user-static -``` - -### Others -There also is an option to use an other docker container to provide support for this. -```bash -# To install and activate -docker run --privileged --rm tonistiigi/binfmt --install arm64,arm -# To unistall -docker run --privileged --rm tonistiigi/binfmt --uninstall 'qemu-*' -``` - - -## Single architecture container building - -You can build a container per supported architecture as long as you have QEMU binfmt support installed on your system.<br> - -```bash -# Default bake triggers a Debian build using the hosts architecture -docker buildx bake --file docker/docker-bake.hcl - -# Bake Debian ARM64 using a debug build -CARGO_PROFILE=dev \ -SOURCE_COMMIT="$(git rev-parse HEAD)" \ -docker buildx bake --file docker/docker-bake.hcl debian-arm64 - -# Bake Alpine ARMv6 as a release build -SOURCE_COMMIT="$(git rev-parse HEAD)" \ -docker buildx bake --file docker/docker-bake.hcl alpine-armv6 -``` - - -## Local Multi Architecture container building - -Start the initialization, this only needs to be done once. - -```bash -# Create and use a new buildx builder instance which connects to the host network -docker buildx create --name vaultwarden --use --driver-opt network=host - -# Validate it runs -docker buildx inspect --bootstrap - -# Create a local container registry directly reachable on the localhost -docker run -d --name registry --network host registry:2 -``` - -After that is done, you should be able to build and push to the local registry.<br> -Use the following command with the modified variables to bake the Alpine images.<br> -Replace `alpine` with `debian` if you want to build the debian multi arch images. - -```bash -# Start a buildx bake using a debug build -CARGO_PROFILE=dev \ -SOURCE_COMMIT="$(git rev-parse HEAD)" \ -CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \ -docker buildx bake --file docker/docker-bake.hcl alpine-multi -``` - - -## Using the `bake.sh` script - -To make it a bit more easier to trigger a build, there also is a `bake.sh` script.<br> -This script calls `docker buildx bake` with all the right parameters and also generates the `SOURCE_COMMIT` and `SOURCE_VERSION` variables.<br> -This script can be called from both the repo root or within the docker directory. - -So, if you want to build a Multi Arch Alpine container pushing to your localhost registry you can run this from within the docker directory. (Just make sure you executed the initialization steps above first) -```bash -CONTAINER_REGISTRIES="localhost:5000/vaultwarden/server" \ -./bake.sh alpine-multi -``` - -Or if you want to just build a Debian container from the repo root, you can run this. -```bash -docker/bake.sh -``` - -You can append both `alpine` and `debian` with `-amd64`, `-arm64`, `-armv7` or `-armv6`, which will trigger a build for that specific platform.<br> -This will also append those values to the tag so you can see the builded container when running `docker images`. - -You can also append extra arguments after the target if you want. This can be useful for example to print what bake will use. -```bash -docker/bake.sh alpine-all --print -``` - -### Testing baked images - -To test these images you can run these images by using the correct tag and provide the platform.<br> -For example, after you have build an arm64 image via `./bake.sh debian-arm64` you can run: -```bash -docker run --rm -it \ - -e DISABLE_ADMIN_TOKEN=true \ - -e I_REALLY_WANT_VOLATILE_STORAGE=true \ - -p8080:80 --platform=linux/arm64 \ - vaultwarden/server:testing-arm64 -``` - - -## Using the `podman-bake.sh` script - -To also make building easier using podman, there is a `podman-bake.sh` script.<br> -This script calls `podman buildx build` with the needed parameters and the same as `bake.sh`, it will generate some variables automatically.<br> -This script can be called from both the repo root or within the docker directory. - -**NOTE:** Unlike the `bake.sh` script, this only supports a single `CONTAINER_REGISTRIES`, and a single `BASE_TAGS` value, no comma separated values. It also only supports building separate architectures, no Multi Arch containers. - -To build an Alpine arm64 image with only sqlite support and mimalloc, run this: -```bash -DB="sqlite,enable_mimalloc" \ -./podman-bake.sh alpine-arm64 -``` - -Or if you want to just build a Debian container from the repo root, you can run this. -```bash -docker/podman-bake.sh -``` - -You can append extra arguments after the target if you want. This can be useful for example to disable cache like this. -```bash -./podman-bake.sh alpine-arm64 --no-cache -``` - -For the podman builds you can, just like the `bake.sh` script, also append the architecture to build for that specific platform.<br> - -### Testing podman builded images - -The command to start a podman built container is almost the same as for the docker/bake built containers. The images start with `localhost/`, so you need to prepend that. - -```bash -podman run --rm -it \ - -e DISABLE_ADMIN_TOKEN=true \ - -e I_REALLY_WANT_VOLATILE_STORAGE=true \ - -p8080:80 --platform=linux/arm64 \ - localhost/vaultwarden/server:testing-arm64 -``` - - -## Variables supported -| Variable | default | description | -| --------------------- | ------------------ | ----------- | -| CARGO_PROFILE | null | Which cargo profile to use. `null` means what is defined in the Dockerfile | -| DB | null | Which `features` to build. `null` means what is defined in the Dockerfile | -| SOURCE_REPOSITORY_URL | null | The source repository form where this build is triggered | -| SOURCE_COMMIT | null | The commit hash of the current commit for this build | -| SOURCE_VERSION | null | The current exact tag of this commit, else the last tag and the first 8 chars of the source commit | -| BASE_TAGS | testing | Tags to be used. Can be a comma separated value like "latest,1.29.2" | -| CONTAINER_REGISTRIES | vaultwarden/server | Comma separated value of container registries. Like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` | -| VW_VERSION | null | To override the `SOURCE_VERSION` value. This is also used by the `build.rs` code for example | diff --git a/docker/bake.sh b/docker/bake.sh @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -# Determine the basedir of this script. -# It should be located in the same directory as the docker-bake.hcl -# This ensures you can run this script from both inside and outside of the docker directory -BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")") - -# Load build env's -source "${BASEDIR}/bake_env.sh" - -# Be verbose on what is being executed -set -x - -# Make sure we set the context to `..` so it will go up one directory -docker buildx bake --progress plain --set "*.context=${BASEDIR}/.." -f "${BASEDIR}/docker-bake.hcl" "$@" diff --git a/docker/bake_env.sh b/docker/bake_env.sh @@ -1,33 +0,0 @@ -#!/usr/bin/env bash - -# If SOURCE_COMMIT is provided via env skip this -if [ -z "${SOURCE_COMMIT+x}" ]; then - SOURCE_COMMIT="$(git rev-parse HEAD)" -fi - -# If VW_VERSION is provided via env use it as SOURCE_VERSION -# Else define it using git -if [[ -n "${VW_VERSION}" ]]; then - SOURCE_VERSION="${VW_VERSION}" -else - GIT_EXACT_TAG="$(git describe --tags --abbrev=0 --exact-match 2>/dev/null)" - if [[ -n "${GIT_EXACT_TAG}" ]]; then - SOURCE_VERSION="${GIT_EXACT_TAG}" - else - GIT_LAST_TAG="$(git describe --tags --abbrev=0)" - SOURCE_VERSION="${GIT_LAST_TAG}-${SOURCE_COMMIT:0:8}" - GIT_BRANCH="$(git rev-parse --abbrev-ref HEAD)" - case "${GIT_BRANCH}" in - main|master|HEAD) - # Do not add the branch name for these branches - ;; - *) - SOURCE_VERSION="${SOURCE_VERSION} (${GIT_BRANCH})" - ;; - esac - fi -fi - -# Export the rendered variables above so bake will use them -export SOURCE_COMMIT -export SOURCE_VERSION diff --git a/docker/docker-bake.hcl b/docker/docker-bake.hcl @@ -1,235 +0,0 @@ -// ==== Baking Variables ==== - -// Set which cargo profile to use, dev or release for example -// Use the value provided in the Dockerfile as default -variable "CARGO_PROFILE" { - default = null -} - -// Set which DB's (features) to enable -// Use the value provided in the Dockerfile as default -variable "DB" { - default = null -} - -// The repository this build was triggered from -variable "SOURCE_REPOSITORY_URL" { - default = null -} - -// The commit hash of of the current commit this build was triggered on -variable "SOURCE_COMMIT" { - default = null -} - -// The version of this build -// Typically the current exact tag of this commit, -// else the last tag and the first 8 characters of the source commit -variable "SOURCE_VERSION" { - default = null -} - -// This can be used to overwrite SOURCE_VERSION -// It will be used during the build.rs building stage -variable "VW_VERSION" { - default = null -} - -// The base tag(s) to use -// This can be a comma separated value like "testing,1.29.2" -variable "BASE_TAGS" { - default = "testing" -} - -// Which container registries should be used for the tagging -// This can be a comma separated value -// Use a full URI like `ghcr.io/dani-garcia/vaultwarden,docker.io/vaultwarden/server` -variable "CONTAINER_REGISTRIES" { - default = "vaultwarden/server" -} - - -// ==== Baking Groups ==== - -group "default" { - targets = ["debian"] -} - - -// ==== Shared Baking ==== -function "labels" { - params = [] - result = { - "org.opencontainers.image.description" = "Unofficial Bitwarden compatible server written in Rust - ${SOURCE_VERSION}" - "org.opencontainers.image.licenses" = "AGPL-3.0-only" - "org.opencontainers.image.documentation" = "https://github.com/dani-garcia/vaultwarden/wiki" - "org.opencontainers.image.url" = "https://github.com/dani-garcia/vaultwarden" - "org.opencontainers.image.created" = "${formatdate("YYYY-MM-DD'T'hh:mm:ssZZZZZ", timestamp())}" - "org.opencontainers.image.source" = "${SOURCE_REPOSITORY_URL}" - "org.opencontainers.image.revision" = "${SOURCE_COMMIT}" - "org.opencontainers.image.version" = "${SOURCE_VERSION}" - } -} - -target "_default_attributes" { - labels = labels() - args = { - DB = "${DB}" - CARGO_PROFILE = "${CARGO_PROFILE}" - VW_VERSION = "${VW_VERSION}" - } -} - - -// ==== Debian Baking ==== - -// Default Debian target, will build a container using the hosts platform architecture -target "debian" { - inherits = ["_default_attributes"] - dockerfile = "docker/Dockerfile.debian" - tags = generate_tags("", platform_tag()) - output = [join(",", flatten([["type=docker"], image_index_annotations()]))] -} - -// Multi Platform target, will build one tagged manifest with all supported architectures -// This is mainly used by GitHub Actions to build and push new containers -target "debian-multi" { - inherits = ["debian"] - platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"] - tags = generate_tags("", "") - output = [join(",", flatten([["type=registry"], image_index_annotations()]))] -} - -// Per platform targets, to individually test building per platform locally -target "debian-amd64" { - inherits = ["debian"] - platforms = ["linux/amd64"] - tags = generate_tags("", "-amd64") -} - -target "debian-arm64" { - inherits = ["debian"] - platforms = ["linux/arm64"] - tags = generate_tags("", "-arm64") -} - -target "debian-armv7" { - inherits = ["debian"] - platforms = ["linux/arm/v7"] - tags = generate_tags("", "-armv7") -} - -target "debian-armv6" { - inherits = ["debian"] - platforms = ["linux/arm/v6"] - tags = generate_tags("", "-armv6") -} - -// A Group to build all platforms individually for local testing -group "debian-all" { - targets = ["debian-amd64", "debian-arm64", "debian-armv7", "debian-armv6"] -} - - -// ==== Alpine Baking ==== - -// Default Alpine target, will build a container using the hosts platform architecture -target "alpine" { - inherits = ["_default_attributes"] - dockerfile = "docker/Dockerfile.alpine" - tags = generate_tags("-alpine", platform_tag()) - output = [join(",", flatten([["type=docker"], image_index_annotations()]))] -} - -// Multi Platform target, will build one tagged manifest with all supported architectures -// This is mainly used by GitHub Actions to build and push new containers -target "alpine-multi" { - inherits = ["alpine"] - platforms = ["linux/amd64", "linux/arm64", "linux/arm/v7", "linux/arm/v6"] - tags = generate_tags("-alpine", "") - output = [join(",", flatten([["type=registry"], image_index_annotations()]))] -} - -// Per platform targets, to individually test building per platform locally -target "alpine-amd64" { - inherits = ["alpine"] - platforms = ["linux/amd64"] - tags = generate_tags("-alpine", "-amd64") -} - -target "alpine-arm64" { - inherits = ["alpine"] - platforms = ["linux/arm64"] - tags = generate_tags("-alpine", "-arm64") -} - -target "alpine-armv7" { - inherits = ["alpine"] - platforms = ["linux/arm/v7"] - tags = generate_tags("-alpine", "-armv7") -} - -target "alpine-armv6" { - inherits = ["alpine"] - platforms = ["linux/arm/v6"] - tags = generate_tags("-alpine", "-armv6") -} - -// A Group to build all platforms individually for local testing -group "alpine-all" { - targets = ["alpine-amd64", "alpine-arm64", "alpine-armv7", "alpine-armv6"] -} - - -// ==== Bake everything locally ==== - -group "all" { - targets = ["debian-all", "alpine-all"] -} - - -// ==== Baking functions ==== - -// This will return the local platform as amd64, arm64 or armv7 for example -// It can be used for creating a local image tag -function "platform_tag" { - params = [] - result = "-${replace(replace(BAKE_LOCAL_PLATFORM, "linux/", ""), "/", "")}" -} - - -function "get_container_registries" { - params = [] - result = flatten(split(",", CONTAINER_REGISTRIES)) -} - -function "get_base_tags" { - params = [] - result = flatten(split(",", BASE_TAGS)) -} - -function "generate_tags" { - params = [ - suffix, // What to append to the BASE_TAG when needed, like `-alpine` for example - platform // the platform we are building for if needed - ] - result = flatten([ - for registry in get_container_registries() : - [for base_tag in get_base_tags() : - concat( - # If the base_tag contains latest, and the suffix contains `-alpine` add a `:alpine` tag too - base_tag == "latest" ? suffix == "-alpine" ? ["${registry}:alpine${platform}"] : [] : [], - # The default tagging strategy - ["${registry}:${base_tag}${suffix}${platform}"] - ) - ] - ]) -} - -function "image_index_annotations" { - params = [] - result = flatten([ - for key, value in labels() : - value != null ? formatlist("annotation-index.%s=%s", "${key}", "${value}") : [] - ]) -} diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh @@ -1,57 +0,0 @@ -#!/bin/sh - -# Use the value of the corresponding env var (if present), -# or a default value otherwise. -: "${DATA_FOLDER:="data"}" -: "${ROCKET_PORT:="80"}" - -CONFIG_FILE="${DATA_FOLDER}"/config.json - -# Given a config key, return the corresponding config value from the -# config file. If the key doesn't exist, return an empty string. -get_config_val() { - key="$1" - # Extract a line of the form: - # "domain": "https://bw.example.com/path", - grep "\"${key}\":" "${CONFIG_FILE}" | - # To extract just the value (https://bw.example.com/path), delete: - # (1) everything up to and including the first ':', - # (2) whitespace and '"' from the front, - # (3) ',' and '"' from the back. - sed -e 's/[^:]\+://' -e 's/^[ "]\+//' -e 's/[,"]\+$//' -} - -# Extract the base path from a domain URL. For example: -# - `` -> `` -# - `https://bw.example.com` -> `` -# - `https://bw.example.com/` -> `` -# - `https://bw.example.com/path` -> `/path` -# - `https://bw.example.com/multi/path` -> `/multi/path` -get_base_path() { - echo "$1" | - # Delete: - # (1) everything up to and including '://', - # (2) everything up to '/', - # (3) trailing '/' from the back. - sed -e 's|.*://||' -e 's|[^/]\+||' -e 's|/*$||' -} - -# Read domain URL from config.json, if present. -if [ -r "${CONFIG_FILE}" ]; then - domain="$(get_config_val 'domain')" - if [ -n "${domain}" ]; then - # config.json 'domain' overrides the DOMAIN env var. - DOMAIN="${domain}" - fi -fi - -addr="${ROCKET_ADDRESS}" -if [ -z "${addr}" ] || [ "${addr}" = '0.0.0.0' ] || [ "${addr}" = '::' ]; then - addr='localhost' -fi -base_path="$(get_base_path "${DOMAIN}")" -if [ -n "${ROCKET_TLS}" ]; then - s='s' -fi -curl --insecure --fail --silent --show-error \ - "http${s}://${addr}:${ROCKET_PORT}${base_path}/alive" || exit 1 diff --git a/docker/podman-bake.sh b/docker/podman-bake.sh @@ -1,105 +0,0 @@ -#!/usr/bin/env bash - -# Determine the basedir of this script. -# It should be located in the same directory as the docker-bake.hcl -# This ensures you can run this script from both inside and outside of the docker directory -BASEDIR=$(RL=$(readlink -n "$0"); SP="${RL:-$0}"; dirname "$(cd "$(dirname "${SP}")" || exit; pwd)/$(basename "${SP}")") - -# Load build env's -source "${BASEDIR}/bake_env.sh" - -# Check if a target is given as first argument -# If not we assume the defaults and pass the given arguments to the podman command -case "${1}" in - alpine*|debian*) - TARGET="${1}" - # Now shift the $@ array so we only have the rest of the arguments - # This allows us too append these as extra arguments too the podman buildx build command - shift - ;; -esac - -LABEL_ARGS=( - --label org.opencontainers.image.description="Unofficial Bitwarden compatible server written in Rust" - --label org.opencontainers.image.licenses="AGPL-3.0-only" - --label org.opencontainers.image.documentation="https://github.com/dani-garcia/vaultwarden/wiki" - --label org.opencontainers.image.url="https://github.com/dani-garcia/vaultwarden" - --label org.opencontainers.image.created="$(date --utc --iso-8601=seconds)" -) -if [[ -n "${SOURCE_REPOSITORY_URL}" ]]; then - LABEL_ARGS+=(--label org.opencontainers.image.source="${SOURCE_REPOSITORY_URL}") -fi -if [[ -n "${SOURCE_COMMIT}" ]]; then - LABEL_ARGS+=(--label org.opencontainers.image.revision="${SOURCE_COMMIT}") -fi -if [[ -n "${SOURCE_VERSION}" ]]; then - LABEL_ARGS+=(--label org.opencontainers.image.version="${SOURCE_VERSION}") -fi - -# Check if and which --build-arg arguments we need to configure -BUILD_ARGS=() -if [[ -n "${DB}" ]]; then - BUILD_ARGS+=(--build-arg DB="${DB}") -fi -if [[ -n "${CARGO_PROFILE}" ]]; then - BUILD_ARGS+=(--build-arg CARGO_PROFILE="${CARGO_PROFILE}") -fi -if [[ -n "${VW_VERSION}" ]]; then - BUILD_ARGS+=(--build-arg VW_VERSION="${VW_VERSION}") -fi - -# Set the default BASE_TAGS if non are provided -if [[ -z "${BASE_TAGS}" ]]; then - BASE_TAGS="testing" -fi - -# Set the default CONTAINER_REGISTRIES if non are provided -if [[ -z "${CONTAINER_REGISTRIES}" ]]; then - CONTAINER_REGISTRIES="vaultwarden/server" -fi - -# Check which Dockerfile we need to use, default is debian -case "${TARGET}" in - alpine*) - BASE_TAGS="${BASE_TAGS}-alpine" - DOCKERFILE="Dockerfile.alpine" - ;; - *) - DOCKERFILE="Dockerfile.debian" - ;; -esac - -# Check which platform we need to build and append the BASE_TAGS with the architecture -case "${TARGET}" in - *-arm64) - BASE_TAGS="${BASE_TAGS}-arm64" - PLATFORM="linux/arm64" - ;; - *-armv7) - BASE_TAGS="${BASE_TAGS}-armv7" - PLATFORM="linux/arm/v7" - ;; - *-armv6) - BASE_TAGS="${BASE_TAGS}-armv6" - PLATFORM="linux/arm/v6" - ;; - *) - BASE_TAGS="${BASE_TAGS}-amd64" - PLATFORM="linux/amd64" - ;; -esac - -# Be verbose on what is being executed -set -x - -# Build the image with podman -# We use the docker format here since we are using `SHELL`, which is not supported by OCI -# shellcheck disable=SC2086 -podman buildx build \ - --platform="${PLATFORM}" \ - --tag="${CONTAINER_REGISTRIES}:${BASE_TAGS}" \ - --format=docker \ - "${LABEL_ARGS[@]}" \ - "${BUILD_ARGS[@]}" \ - --file="${BASEDIR}/${DOCKERFILE}" "$@" \ - "${BASEDIR}/.." diff --git a/docker/render_template b/docker/render_template @@ -1,31 +0,0 @@ -#!/usr/bin/env python3 - -import os -import argparse -import json -import yaml -import jinja2 - -# Load settings file -with open("DockerSettings.yaml", 'r') as yaml_file: - yaml_data = yaml.safe_load(yaml_file) - -settings_env = jinja2.Environment( - loader=jinja2.FileSystemLoader(os.getcwd()), -) -settings_yaml = yaml.safe_load(settings_env.get_template("DockerSettings.yaml").render(yaml_data)) - -args_parser = argparse.ArgumentParser() -args_parser.add_argument('template_file', help='Jinja2 template file to render.') -args_parser.add_argument('render_vars', help='JSON-encoded data to pass to the templating engine.') -cli_args = args_parser.parse_args() - -# Merge the default config yaml with the json arguments given. -render_vars = json.loads(cli_args.render_vars) -settings_yaml.update(render_vars) - -environment = jinja2.Environment( - loader=jinja2.FileSystemLoader(os.getcwd()), - trim_blocks=True, -) -print(environment.get_template(cli_args.template_file).render(settings_yaml)) diff --git a/docker/start.sh b/docker/start.sh @@ -1,25 +0,0 @@ -#!/bin/sh - -if [ -r /etc/vaultwarden.sh ]; then - . /etc/vaultwarden.sh -elif [ -r /etc/bitwarden_rs.sh ]; then - echo "### You are using the old /etc/bitwarden_rs.sh script, please migrate to /etc/vaultwarden.sh ###" - . /etc/bitwarden_rs.sh -fi - -if [ -d /etc/vaultwarden.d ]; then - for f in /etc/vaultwarden.d/*.sh; do - if [ -r "${f}" ]; then - . "${f}" - fi - done -elif [ -d /etc/bitwarden_rs.d ]; then - echo "### You are using the old /etc/bitwarden_rs.d script directory, please migrate to /etc/vaultwarden.d ###" - for f in /etc/bitwarden_rs.d/*.sh; do - if [ -r "${f}" ]; then - . "${f}" - fi - done -fi - -exec /vaultwarden "${@}" diff --git a/migrations/mysql/2018-01-14-171611_create_tables/down.sql b/migrations/mysql/2018-01-14-171611_create_tables/down.sql @@ -1,9 +0,0 @@ -DROP TABLE users; - -DROP TABLE devices; - -DROP TABLE ciphers; - -DROP TABLE attachments; - -DROP TABLE folders; -\ No newline at end of file diff --git a/migrations/mysql/2018-01-14-171611_create_tables/up.sql b/migrations/mysql/2018-01-14-171611_create_tables/up.sql @@ -1,62 +0,0 @@ -CREATE TABLE users ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at DATETIME NOT NULL, - updated_at DATETIME NOT NULL, - email VARCHAR(255) NOT NULL UNIQUE, - name TEXT NOT NULL, - password_hash BLOB NOT NULL, - salt BLOB NOT NULL, - password_iterations INTEGER NOT NULL, - password_hint TEXT, - `key` TEXT NOT NULL, - private_key TEXT, - public_key TEXT, - totp_secret TEXT, - totp_recover TEXT, - security_stamp TEXT NOT NULL, - equivalent_domains TEXT NOT NULL, - excluded_globals TEXT NOT NULL -); - -CREATE TABLE devices ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at DATETIME NOT NULL, - updated_at DATETIME NOT NULL, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - name TEXT NOT NULL, - type INTEGER NOT NULL, - push_token TEXT, - refresh_token TEXT NOT NULL -); - -CREATE TABLE ciphers ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at DATETIME NOT NULL, - updated_at DATETIME NOT NULL, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - folder_uuid CHAR(36) REFERENCES folders (uuid), - organization_uuid CHAR(36), - type INTEGER NOT NULL, - name TEXT NOT NULL, - notes TEXT, - fields TEXT, - data TEXT NOT NULL, - favorite BOOLEAN NOT NULL -); - -CREATE TABLE attachments ( - id CHAR(36) NOT NULL PRIMARY KEY, - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid), - file_name TEXT NOT NULL, - file_size INTEGER NOT NULL - -); - -CREATE TABLE folders ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at DATETIME NOT NULL, - updated_at DATETIME NOT NULL, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - name TEXT NOT NULL -); - diff --git a/migrations/mysql/2018-02-17-205753_create_collections_and_orgs/down.sql b/migrations/mysql/2018-02-17-205753_create_collections_and_orgs/down.sql @@ -1,8 +0,0 @@ -DROP TABLE collections; - -DROP TABLE organizations; - - -DROP TABLE users_collections; - -DROP TABLE users_organizations; diff --git a/migrations/mysql/2018-02-17-205753_create_collections_and_orgs/up.sql b/migrations/mysql/2018-02-17-205753_create_collections_and_orgs/up.sql @@ -1,30 +0,0 @@ -CREATE TABLE collections ( - uuid VARCHAR(40) NOT NULL PRIMARY KEY, - org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid), - name TEXT NOT NULL -); - -CREATE TABLE organizations ( - uuid VARCHAR(40) NOT NULL PRIMARY KEY, - name TEXT NOT NULL, - billing_email TEXT NOT NULL -); - -CREATE TABLE users_collections ( - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid), - PRIMARY KEY (user_uuid, collection_uuid) -); - -CREATE TABLE users_organizations ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid), - - access_all BOOLEAN NOT NULL, - `key` TEXT NOT NULL, - status INTEGER NOT NULL, - type INTEGER NOT NULL, - - UNIQUE (user_uuid, org_uuid) -); diff --git a/migrations/mysql/2018-04-27-155151_create_users_ciphers/down.sql b/migrations/mysql/2018-04-27-155151_create_users_ciphers/down.sql diff --git a/migrations/mysql/2018-04-27-155151_create_users_ciphers/up.sql b/migrations/mysql/2018-04-27-155151_create_users_ciphers/up.sql @@ -1,34 +0,0 @@ -ALTER TABLE ciphers RENAME TO oldCiphers; - -CREATE TABLE ciphers ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at DATETIME NOT NULL, - updated_at DATETIME NOT NULL, - user_uuid CHAR(36) REFERENCES users (uuid), -- Make this optional - organization_uuid CHAR(36) REFERENCES organizations (uuid), -- Add reference to orgs table - -- Remove folder_uuid - type INTEGER NOT NULL, - name TEXT NOT NULL, - notes TEXT, - fields TEXT, - data TEXT NOT NULL, - favorite BOOLEAN NOT NULL -); - -CREATE TABLE folders_ciphers ( - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid), - folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid), - - PRIMARY KEY (cipher_uuid, folder_uuid) -); - -INSERT INTO ciphers (uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite) -SELECT uuid, created_at, updated_at, user_uuid, organization_uuid, type, name, notes, fields, data, favorite FROM oldCiphers; - -INSERT INTO folders_ciphers (cipher_uuid, folder_uuid) -SELECT uuid, folder_uuid FROM oldCiphers WHERE folder_uuid IS NOT NULL; - - -DROP TABLE oldCiphers; - -ALTER TABLE users_collections ADD COLUMN read_only BOOLEAN NOT NULL DEFAULT 0; -- False diff --git a/migrations/mysql/2018-05-08-161616_create_collection_cipher_map/down.sql b/migrations/mysql/2018-05-08-161616_create_collection_cipher_map/down.sql @@ -1 +0,0 @@ -DROP TABLE ciphers_collections; -\ No newline at end of file diff --git a/migrations/mysql/2018-05-08-161616_create_collection_cipher_map/up.sql b/migrations/mysql/2018-05-08-161616_create_collection_cipher_map/up.sql @@ -1,5 +0,0 @@ -CREATE TABLE ciphers_collections ( - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid), - collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid), - PRIMARY KEY (cipher_uuid, collection_uuid) -); diff --git a/migrations/mysql/2018-05-25-232323_update_attachments_reference/down.sql b/migrations/mysql/2018-05-25-232323_update_attachments_reference/down.sql diff --git a/migrations/mysql/2018-05-25-232323_update_attachments_reference/up.sql b/migrations/mysql/2018-05-25-232323_update_attachments_reference/up.sql @@ -1,14 +0,0 @@ -ALTER TABLE attachments RENAME TO oldAttachments; - -CREATE TABLE attachments ( - id CHAR(36) NOT NULL PRIMARY KEY, - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid), - file_name TEXT NOT NULL, - file_size INTEGER NOT NULL - -); - -INSERT INTO attachments (id, cipher_uuid, file_name, file_size) -SELECT id, cipher_uuid, file_name, file_size FROM oldAttachments; - -DROP TABLE oldAttachments; diff --git a/migrations/mysql/2018-06-01-112529_update_devices_twofactor_remember/down.sql b/migrations/mysql/2018-06-01-112529_update_devices_twofactor_remember/down.sql @@ -1 +0,0 @@ --- This file should undo anything in `up.sql` -\ No newline at end of file diff --git a/migrations/mysql/2018-06-01-112529_update_devices_twofactor_remember/up.sql b/migrations/mysql/2018-06-01-112529_update_devices_twofactor_remember/up.sql @@ -1,3 +0,0 @@ -ALTER TABLE devices - ADD COLUMN - twofactor_remember TEXT; -\ No newline at end of file diff --git a/migrations/mysql/2018-07-11-181453_create_u2f_twofactor/down.sql b/migrations/mysql/2018-07-11-181453_create_u2f_twofactor/down.sql @@ -1,8 +0,0 @@ -UPDATE users -SET totp_secret = ( - SELECT twofactor.data FROM twofactor - WHERE twofactor.type = 0 - AND twofactor.user_uuid = users.uuid -); - -DROP TABLE twofactor; -\ No newline at end of file diff --git a/migrations/mysql/2018-07-11-181453_create_u2f_twofactor/up.sql b/migrations/mysql/2018-07-11-181453_create_u2f_twofactor/up.sql @@ -1,15 +0,0 @@ -CREATE TABLE twofactor ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - type INTEGER NOT NULL, - enabled BOOLEAN NOT NULL, - data TEXT NOT NULL, - - UNIQUE (user_uuid, type) -); - - -INSERT INTO twofactor (uuid, user_uuid, type, enabled, data) -SELECT UUID(), uuid, 0, 1, u.totp_secret FROM users u where u.totp_secret IS NOT NULL; - -UPDATE users SET totp_secret = NULL; -- Instead of recreating the table, just leave the columns empty diff --git a/migrations/mysql/2018-08-27-172114_update_ciphers/down.sql b/migrations/mysql/2018-08-27-172114_update_ciphers/down.sql diff --git a/migrations/mysql/2018-08-27-172114_update_ciphers/up.sql b/migrations/mysql/2018-08-27-172114_update_ciphers/up.sql @@ -1,3 +0,0 @@ -ALTER TABLE ciphers - ADD COLUMN - password_history TEXT; -\ No newline at end of file diff --git a/migrations/mysql/2018-09-10-111213_add_invites/down.sql b/migrations/mysql/2018-09-10-111213_add_invites/down.sql @@ -1 +0,0 @@ -DROP TABLE invitations; -\ No newline at end of file diff --git a/migrations/mysql/2018-09-10-111213_add_invites/up.sql b/migrations/mysql/2018-09-10-111213_add_invites/up.sql @@ -1,3 +0,0 @@ -CREATE TABLE invitations ( - email VARCHAR(255) NOT NULL PRIMARY KEY -); diff --git a/migrations/mysql/2018-09-19-144557_add_kdf_columns/down.sql b/migrations/mysql/2018-09-19-144557_add_kdf_columns/down.sql diff --git a/migrations/mysql/2018-09-19-144557_add_kdf_columns/up.sql b/migrations/mysql/2018-09-19-144557_add_kdf_columns/up.sql @@ -1,7 +0,0 @@ -ALTER TABLE users - ADD COLUMN - client_kdf_type INTEGER NOT NULL DEFAULT 0; -- PBKDF2 - -ALTER TABLE users - ADD COLUMN - client_kdf_iter INTEGER NOT NULL DEFAULT 100000; diff --git a/migrations/mysql/2018-11-27-152651_add_att_key_columns/down.sql b/migrations/mysql/2018-11-27-152651_add_att_key_columns/down.sql diff --git a/migrations/mysql/2018-11-27-152651_add_att_key_columns/up.sql b/migrations/mysql/2018-11-27-152651_add_att_key_columns/up.sql @@ -1,3 +0,0 @@ -ALTER TABLE attachments - ADD COLUMN - `key` TEXT; diff --git a/migrations/mysql/2019-05-26-216651_rename_key_and_type_columns/down.sql b/migrations/mysql/2019-05-26-216651_rename_key_and_type_columns/down.sql @@ -1,7 +0,0 @@ -ALTER TABLE attachments CHANGE COLUMN akey `key` TEXT; -ALTER TABLE ciphers CHANGE COLUMN atype type INTEGER NOT NULL; -ALTER TABLE devices CHANGE COLUMN atype type INTEGER NOT NULL; -ALTER TABLE twofactor CHANGE COLUMN atype type INTEGER NOT NULL; -ALTER TABLE users CHANGE COLUMN akey `key` TEXT; -ALTER TABLE users_organizations CHANGE COLUMN akey `key` TEXT; -ALTER TABLE users_organizations CHANGE COLUMN atype type INTEGER NOT NULL; -\ No newline at end of file diff --git a/migrations/mysql/2019-05-26-216651_rename_key_and_type_columns/up.sql b/migrations/mysql/2019-05-26-216651_rename_key_and_type_columns/up.sql @@ -1,7 +0,0 @@ -ALTER TABLE attachments CHANGE COLUMN `key` akey TEXT; -ALTER TABLE ciphers CHANGE COLUMN type atype INTEGER NOT NULL; -ALTER TABLE devices CHANGE COLUMN type atype INTEGER NOT NULL; -ALTER TABLE twofactor CHANGE COLUMN type atype INTEGER NOT NULL; -ALTER TABLE users CHANGE COLUMN `key` akey TEXT; -ALTER TABLE users_organizations CHANGE COLUMN `key` akey TEXT; -ALTER TABLE users_organizations CHANGE COLUMN type atype INTEGER NOT NULL; -\ No newline at end of file diff --git a/migrations/mysql/2019-10-10-083032_add_column_to_twofactor/down.sql b/migrations/mysql/2019-10-10-083032_add_column_to_twofactor/down.sql diff --git a/migrations/mysql/2019-10-10-083032_add_column_to_twofactor/up.sql b/migrations/mysql/2019-10-10-083032_add_column_to_twofactor/up.sql @@ -1 +0,0 @@ -ALTER TABLE twofactor ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0; -\ No newline at end of file diff --git a/migrations/mysql/2019-11-17-011009_add_email_verification/down.sql b/migrations/mysql/2019-11-17-011009_add_email_verification/down.sql @@ -1 +0,0 @@ - diff --git a/migrations/mysql/2019-11-17-011009_add_email_verification/up.sql b/migrations/mysql/2019-11-17-011009_add_email_verification/up.sql @@ -1,5 +0,0 @@ -ALTER TABLE users ADD COLUMN verified_at DATETIME DEFAULT NULL; -ALTER TABLE users ADD COLUMN last_verifying_at DATETIME DEFAULT NULL; -ALTER TABLE users ADD COLUMN login_verify_count INTEGER NOT NULL DEFAULT 0; -ALTER TABLE users ADD COLUMN email_new VARCHAR(255) DEFAULT NULL; -ALTER TABLE users ADD COLUMN email_new_token VARCHAR(16) DEFAULT NULL; diff --git a/migrations/mysql/2020-03-13-205045_add_policy_table/down.sql b/migrations/mysql/2020-03-13-205045_add_policy_table/down.sql @@ -1 +0,0 @@ -DROP TABLE org_policies; diff --git a/migrations/mysql/2020-03-13-205045_add_policy_table/up.sql b/migrations/mysql/2020-03-13-205045_add_policy_table/up.sql @@ -1,9 +0,0 @@ -CREATE TABLE org_policies ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid), - atype INTEGER NOT NULL, - enabled BOOLEAN NOT NULL, - data TEXT NOT NULL, - - UNIQUE (org_uuid, atype) -); diff --git a/migrations/mysql/2020-04-09-235005_add_cipher_delete_date/down.sql b/migrations/mysql/2020-04-09-235005_add_cipher_delete_date/down.sql @@ -1 +0,0 @@ - diff --git a/migrations/mysql/2020-04-09-235005_add_cipher_delete_date/up.sql b/migrations/mysql/2020-04-09-235005_add_cipher_delete_date/up.sql @@ -1,3 +0,0 @@ -ALTER TABLE ciphers - ADD COLUMN - deleted_at DATETIME; diff --git a/migrations/mysql/2020-07-01-214531_add_hide_passwords/down.sql b/migrations/mysql/2020-07-01-214531_add_hide_passwords/down.sql diff --git a/migrations/mysql/2020-07-01-214531_add_hide_passwords/up.sql b/migrations/mysql/2020-07-01-214531_add_hide_passwords/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users_collections -ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/migrations/mysql/2020-08-02-025025_add_favorites_table/down.sql b/migrations/mysql/2020-08-02-025025_add_favorites_table/down.sql @@ -1,13 +0,0 @@ -ALTER TABLE ciphers -ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE; - --- Transfer favorite status for user-owned ciphers. -UPDATE ciphers -SET favorite = TRUE -WHERE EXISTS ( - SELECT * FROM favorites - WHERE favorites.user_uuid = ciphers.user_uuid - AND favorites.cipher_uuid = ciphers.uuid -); - -DROP TABLE favorites; diff --git a/migrations/mysql/2020-08-02-025025_add_favorites_table/up.sql b/migrations/mysql/2020-08-02-025025_add_favorites_table/up.sql @@ -1,16 +0,0 @@ -CREATE TABLE favorites ( - user_uuid CHAR(36) NOT NULL REFERENCES users(uuid), - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers(uuid), - - PRIMARY KEY (user_uuid, cipher_uuid) -); - --- Transfer favorite status for user-owned ciphers. -INSERT INTO favorites(user_uuid, cipher_uuid) -SELECT user_uuid, uuid -FROM ciphers -WHERE favorite = TRUE - AND user_uuid IS NOT NULL; - -ALTER TABLE ciphers -DROP COLUMN favorite; diff --git a/migrations/mysql/2020-11-30-224000_add_user_enabled/down.sql b/migrations/mysql/2020-11-30-224000_add_user_enabled/down.sql diff --git a/migrations/mysql/2020-11-30-224000_add_user_enabled/up.sql b/migrations/mysql/2020-11-30-224000_add_user_enabled/up.sql @@ -1 +0,0 @@ -ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT 1; diff --git a/migrations/mysql/2020-12-09-173101_add_stamp_exception/down.sql b/migrations/mysql/2020-12-09-173101_add_stamp_exception/down.sql diff --git a/migrations/mysql/2020-12-09-173101_add_stamp_exception/up.sql b/migrations/mysql/2020-12-09-173101_add_stamp_exception/up.sql @@ -1 +0,0 @@ -ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL; -\ No newline at end of file diff --git a/migrations/mysql/2021-03-11-190243_add_sends/down.sql b/migrations/mysql/2021-03-11-190243_add_sends/down.sql @@ -1 +0,0 @@ -DROP TABLE sends; diff --git a/migrations/mysql/2021-03-11-190243_add_sends/up.sql b/migrations/mysql/2021-03-11-190243_add_sends/up.sql @@ -1,25 +0,0 @@ -CREATE TABLE sends ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) REFERENCES users (uuid), - organization_uuid CHAR(36) REFERENCES organizations (uuid), - - name TEXT NOT NULL, - notes TEXT, - - atype INTEGER NOT NULL, - data TEXT NOT NULL, - akey TEXT NOT NULL, - password_hash BLOB, - password_salt BLOB, - password_iter INTEGER, - - max_access_count INTEGER, - access_count INTEGER NOT NULL, - - creation_date DATETIME NOT NULL, - revision_date DATETIME NOT NULL, - expiration_date DATETIME, - deletion_date DATETIME NOT NULL, - - disabled BOOLEAN NOT NULL -); -\ No newline at end of file diff --git a/migrations/mysql/2021-04-30-233251_add_reprompt/down.sql b/migrations/mysql/2021-04-30-233251_add_reprompt/down.sql diff --git a/migrations/mysql/2021-04-30-233251_add_reprompt/up.sql b/migrations/mysql/2021-04-30-233251_add_reprompt/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE ciphers -ADD COLUMN reprompt INTEGER; diff --git a/migrations/mysql/2021-05-11-205202_add_hide_email/down.sql b/migrations/mysql/2021-05-11-205202_add_hide_email/down.sql diff --git a/migrations/mysql/2021-05-11-205202_add_hide_email/up.sql b/migrations/mysql/2021-05-11-205202_add_hide_email/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE sends -ADD COLUMN hide_email BOOLEAN; diff --git a/migrations/mysql/2021-07-01-203140_add_password_reset_keys/down.sql b/migrations/mysql/2021-07-01-203140_add_password_reset_keys/down.sql diff --git a/migrations/mysql/2021-07-01-203140_add_password_reset_keys/up.sql b/migrations/mysql/2021-07-01-203140_add_password_reset_keys/up.sql @@ -1,5 +0,0 @@ -ALTER TABLE organizations - ADD COLUMN private_key TEXT; - -ALTER TABLE organizations - ADD COLUMN public_key TEXT; diff --git a/migrations/mysql/2021-08-30-193501_create_emergency_access/down.sql b/migrations/mysql/2021-08-30-193501_create_emergency_access/down.sql @@ -1 +0,0 @@ -DROP TABLE emergency_access; diff --git a/migrations/mysql/2021-08-30-193501_create_emergency_access/up.sql b/migrations/mysql/2021-08-30-193501_create_emergency_access/up.sql @@ -1,14 +0,0 @@ -CREATE TABLE emergency_access ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - grantor_uuid CHAR(36) REFERENCES users (uuid), - grantee_uuid CHAR(36) REFERENCES users (uuid), - email VARCHAR(255), - key_encrypted TEXT, - atype INTEGER NOT NULL, - status INTEGER NOT NULL, - wait_time_days INTEGER NOT NULL, - recovery_initiated_at DATETIME, - last_notification_at DATETIME, - updated_at DATETIME NOT NULL, - created_at DATETIME NOT NULL -); diff --git a/migrations/mysql/2021-10-24-164321_add_2fa_incomplete/down.sql b/migrations/mysql/2021-10-24-164321_add_2fa_incomplete/down.sql @@ -1 +0,0 @@ -DROP TABLE twofactor_incomplete; diff --git a/migrations/mysql/2021-10-24-164321_add_2fa_incomplete/up.sql b/migrations/mysql/2021-10-24-164321_add_2fa_incomplete/up.sql @@ -1,9 +0,0 @@ -CREATE TABLE twofactor_incomplete ( - user_uuid CHAR(36) NOT NULL REFERENCES users(uuid), - device_uuid CHAR(36) NOT NULL, - device_name TEXT NOT NULL, - login_time DATETIME NOT NULL, - ip_address TEXT NOT NULL, - - PRIMARY KEY (user_uuid, device_uuid) -); diff --git a/migrations/mysql/2022-01-17-234911_add_api_key/down.sql b/migrations/mysql/2022-01-17-234911_add_api_key/down.sql diff --git a/migrations/mysql/2022-01-17-234911_add_api_key/up.sql b/migrations/mysql/2022-01-17-234911_add_api_key/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users -ADD COLUMN api_key VARCHAR(255); diff --git a/migrations/mysql/2022-03-02-210038_update_devices_primary_key/down.sql b/migrations/mysql/2022-03-02-210038_update_devices_primary_key/down.sql diff --git a/migrations/mysql/2022-03-02-210038_update_devices_primary_key/up.sql b/migrations/mysql/2022-03-02-210038_update_devices_primary_key/up.sql @@ -1,4 +0,0 @@ --- First remove the previous primary key -ALTER TABLE devices DROP PRIMARY KEY; --- Add a new combined one -ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid); diff --git a/migrations/mysql/2022-07-27-110000_add_group_support/down.sql b/migrations/mysql/2022-07-27-110000_add_group_support/down.sql @@ -1,3 +0,0 @@ -DROP TABLE `groups`; -DROP TABLE groups_users; -DROP TABLE collections_groups; -\ No newline at end of file diff --git a/migrations/mysql/2022-07-27-110000_add_group_support/up.sql b/migrations/mysql/2022-07-27-110000_add_group_support/up.sql @@ -1,23 +0,0 @@ -CREATE TABLE `groups` ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid), - name VARCHAR(100) NOT NULL, - access_all BOOLEAN NOT NULL, - external_id VARCHAR(300) NULL, - creation_date DATETIME NOT NULL, - revision_date DATETIME NOT NULL -); - -CREATE TABLE groups_users ( - groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid), - users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid), - UNIQUE (groups_uuid, users_organizations_uuid) -); - -CREATE TABLE collections_groups ( - collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid), - groups_uuid CHAR(36) NOT NULL REFERENCES `groups` (uuid), - read_only BOOLEAN NOT NULL, - hide_passwords BOOLEAN NOT NULL, - UNIQUE (collections_uuid, groups_uuid) -); -\ No newline at end of file diff --git a/migrations/mysql/2022-10-18-170602_add_events/down.sql b/migrations/mysql/2022-10-18-170602_add_events/down.sql @@ -1 +0,0 @@ -DROP TABLE event; diff --git a/migrations/mysql/2022-10-18-170602_add_events/up.sql b/migrations/mysql/2022-10-18-170602_add_events/up.sql @@ -1,19 +0,0 @@ -CREATE TABLE event ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - event_type INTEGER NOT NULL, - user_uuid CHAR(36), - org_uuid CHAR(36), - cipher_uuid CHAR(36), - collection_uuid CHAR(36), - group_uuid CHAR(36), - org_user_uuid CHAR(36), - act_user_uuid CHAR(36), - device_type INTEGER, - ip_address TEXT, - event_date DATETIME NOT NULL, - policy_uuid CHAR(36), - provider_uuid CHAR(36), - provider_user_uuid CHAR(36), - provider_org_uuid CHAR(36), - UNIQUE (uuid) -); diff --git a/migrations/mysql/2023-01-06-151600_add_reset_password_support/down.sql b/migrations/mysql/2023-01-06-151600_add_reset_password_support/down.sql diff --git a/migrations/mysql/2023-01-06-151600_add_reset_password_support/up.sql b/migrations/mysql/2023-01-06-151600_add_reset_password_support/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users_organizations -ADD COLUMN reset_password_key TEXT; diff --git a/migrations/mysql/2023-01-11-205851_add_avatar_color/down.sql b/migrations/mysql/2023-01-11-205851_add_avatar_color/down.sql diff --git a/migrations/mysql/2023-01-11-205851_add_avatar_color/up.sql b/migrations/mysql/2023-01-11-205851_add_avatar_color/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users -ADD COLUMN avatar_color VARCHAR(7); diff --git a/migrations/mysql/2023-01-31-222222_add_argon2/down.sql b/migrations/mysql/2023-01-31-222222_add_argon2/down.sql diff --git a/migrations/mysql/2023-01-31-222222_add_argon2/up.sql b/migrations/mysql/2023-01-31-222222_add_argon2/up.sql @@ -1,7 +0,0 @@ -ALTER TABLE users - ADD COLUMN - client_kdf_memory INTEGER DEFAULT NULL; - -ALTER TABLE users - ADD COLUMN - client_kdf_parallelism INTEGER DEFAULT NULL; diff --git a/migrations/mysql/2023-02-18-125735_push_uuid_table/down.sql b/migrations/mysql/2023-02-18-125735_push_uuid_table/down.sql diff --git a/migrations/mysql/2023-02-18-125735_push_uuid_table/up.sql b/migrations/mysql/2023-02-18-125735_push_uuid_table/up.sql @@ -1 +0,0 @@ -ALTER TABLE devices ADD COLUMN push_uuid TEXT; -\ No newline at end of file diff --git a/migrations/mysql/2023-06-02-200424_create_organization_api_key/down.sql b/migrations/mysql/2023-06-02-200424_create_organization_api_key/down.sql diff --git a/migrations/mysql/2023-06-02-200424_create_organization_api_key/up.sql b/migrations/mysql/2023-06-02-200424_create_organization_api_key/up.sql @@ -1,10 +0,0 @@ -CREATE TABLE organization_api_key ( - uuid CHAR(36) NOT NULL, - org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid), - atype INTEGER NOT NULL, - api_key VARCHAR(255) NOT NULL, - revision_date DATETIME NOT NULL, - PRIMARY KEY(uuid, org_uuid) -); - -ALTER TABLE users ADD COLUMN external_id TEXT; diff --git a/migrations/mysql/2023-06-17-200424_create_auth_requests_table/down.sql b/migrations/mysql/2023-06-17-200424_create_auth_requests_table/down.sql diff --git a/migrations/mysql/2023-06-17-200424_create_auth_requests_table/up.sql b/migrations/mysql/2023-06-17-200424_create_auth_requests_table/up.sql @@ -1,19 +0,0 @@ -CREATE TABLE auth_requests ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) NOT NULL, - organization_uuid CHAR(36), - request_device_identifier CHAR(36) NOT NULL, - device_type INTEGER NOT NULL, - request_ip TEXT NOT NULL, - response_device_id CHAR(36), - access_code TEXT NOT NULL, - public_key TEXT NOT NULL, - enc_key TEXT NOT NULL, - master_password_hash TEXT NOT NULL, - approved BOOLEAN, - creation_date DATETIME NOT NULL, - response_date DATETIME, - authentication_date DATETIME, - FOREIGN KEY(user_uuid) REFERENCES users(uuid), - FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid) -); -\ No newline at end of file diff --git a/migrations/mysql/2023-06-28-133700_add_collection_external_id/down.sql b/migrations/mysql/2023-06-28-133700_add_collection_external_id/down.sql diff --git a/migrations/mysql/2023-06-28-133700_add_collection_external_id/up.sql b/migrations/mysql/2023-06-28-133700_add_collection_external_id/up.sql @@ -1 +0,0 @@ -ALTER TABLE collections ADD COLUMN external_id TEXT; diff --git a/migrations/mysql/2023-09-01-170620_update_auth_request_table/down.sql b/migrations/mysql/2023-09-01-170620_update_auth_request_table/down.sql diff --git a/migrations/mysql/2023-09-01-170620_update_auth_request_table/up.sql b/migrations/mysql/2023-09-01-170620_update_auth_request_table/up.sql @@ -1,5 +0,0 @@ -ALTER TABLE auth_requests -MODIFY master_password_hash TEXT; - -ALTER TABLE auth_requests -MODIFY enc_key TEXT; diff --git a/migrations/mysql/2023-09-02-212336_move_user_external_id/down.sql b/migrations/mysql/2023-09-02-212336_move_user_external_id/down.sql diff --git a/migrations/mysql/2023-09-02-212336_move_user_external_id/up.sql b/migrations/mysql/2023-09-02-212336_move_user_external_id/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users_organizations -ADD COLUMN external_id TEXT; diff --git a/migrations/mysql/2023-10-21-221242_add_cipher_key/down.sql b/migrations/mysql/2023-10-21-221242_add_cipher_key/down.sql diff --git a/migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql b/migrations/mysql/2023-10-21-221242_add_cipher_key/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE ciphers -ADD COLUMN `key` TEXT; diff --git a/migrations/postgresql/2019-09-12-100000_create_tables/down.sql b/migrations/postgresql/2019-09-12-100000_create_tables/down.sql @@ -1,13 +0,0 @@ -DROP TABLE devices; -DROP TABLE attachments; -DROP TABLE users_collections; -DROP TABLE users_organizations; -DROP TABLE folders_ciphers; -DROP TABLE ciphers_collections; -DROP TABLE twofactor; -DROP TABLE invitations; -DROP TABLE collections; -DROP TABLE folders; -DROP TABLE ciphers; -DROP TABLE users; -DROP TABLE organizations; diff --git a/migrations/postgresql/2019-09-12-100000_create_tables/up.sql b/migrations/postgresql/2019-09-12-100000_create_tables/up.sql @@ -1,121 +0,0 @@ -CREATE TABLE users ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - email VARCHAR(255) NOT NULL UNIQUE, - name TEXT NOT NULL, - password_hash BYTEA NOT NULL, - salt BYTEA NOT NULL, - password_iterations INTEGER NOT NULL, - password_hint TEXT, - akey TEXT NOT NULL, - private_key TEXT, - public_key TEXT, - totp_secret TEXT, - totp_recover TEXT, - security_stamp TEXT NOT NULL, - equivalent_domains TEXT NOT NULL, - excluded_globals TEXT NOT NULL, - client_kdf_type INTEGER NOT NULL DEFAULT 0, - client_kdf_iter INTEGER NOT NULL DEFAULT 100000 -); - -CREATE TABLE devices ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - name TEXT NOT NULL, - atype INTEGER NOT NULL, - push_token TEXT, - refresh_token TEXT NOT NULL, - twofactor_remember TEXT -); - -CREATE TABLE organizations ( - uuid VARCHAR(40) NOT NULL PRIMARY KEY, - name TEXT NOT NULL, - billing_email TEXT NOT NULL -); - -CREATE TABLE ciphers ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - user_uuid CHAR(36) REFERENCES users (uuid), - organization_uuid CHAR(36) REFERENCES organizations (uuid), - atype INTEGER NOT NULL, - name TEXT NOT NULL, - notes TEXT, - fields TEXT, - data TEXT NOT NULL, - favorite BOOLEAN NOT NULL, - password_history TEXT -); - -CREATE TABLE attachments ( - id CHAR(36) NOT NULL PRIMARY KEY, - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid), - file_name TEXT NOT NULL, - file_size INTEGER NOT NULL, - akey TEXT -); - -CREATE TABLE folders ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - name TEXT NOT NULL -); - -CREATE TABLE collections ( - uuid VARCHAR(40) NOT NULL PRIMARY KEY, - org_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid), - name TEXT NOT NULL -); - -CREATE TABLE users_collections ( - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid), - read_only BOOLEAN NOT NULL DEFAULT false, - PRIMARY KEY (user_uuid, collection_uuid) -); - -CREATE TABLE users_organizations ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid), - - access_all BOOLEAN NOT NULL, - akey TEXT NOT NULL, - status INTEGER NOT NULL, - atype INTEGER NOT NULL, - - UNIQUE (user_uuid, org_uuid) -); - -CREATE TABLE folders_ciphers ( - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid), - folder_uuid CHAR(36) NOT NULL REFERENCES folders (uuid), - PRIMARY KEY (cipher_uuid, folder_uuid) -); - -CREATE TABLE ciphers_collections ( - cipher_uuid CHAR(36) NOT NULL REFERENCES ciphers (uuid), - collection_uuid CHAR(36) NOT NULL REFERENCES collections (uuid), - PRIMARY KEY (cipher_uuid, collection_uuid) -); - -CREATE TABLE twofactor ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) NOT NULL REFERENCES users (uuid), - atype INTEGER NOT NULL, - enabled BOOLEAN NOT NULL, - data TEXT NOT NULL, - UNIQUE (user_uuid, atype) -); - -CREATE TABLE invitations ( - email VARCHAR(255) NOT NULL PRIMARY KEY -); -\ No newline at end of file diff --git a/migrations/postgresql/2019-09-16-150000_fix_attachments/down.sql b/migrations/postgresql/2019-09-16-150000_fix_attachments/down.sql @@ -1,26 +0,0 @@ -ALTER TABLE attachments ALTER COLUMN id TYPE CHAR(36); -ALTER TABLE attachments ALTER COLUMN cipher_uuid TYPE CHAR(36); -ALTER TABLE users ALTER COLUMN uuid TYPE CHAR(36); -ALTER TABLE users ALTER COLUMN email TYPE VARCHAR(255); -ALTER TABLE devices ALTER COLUMN uuid TYPE CHAR(36); -ALTER TABLE devices ALTER COLUMN user_uuid TYPE CHAR(36); -ALTER TABLE organizations ALTER COLUMN uuid TYPE CHAR(40); -ALTER TABLE ciphers ALTER COLUMN uuid TYPE CHAR(36); -ALTER TABLE ciphers ALTER COLUMN user_uuid TYPE CHAR(36); -ALTER TABLE ciphers ALTER COLUMN organization_uuid TYPE CHAR(36); -ALTER TABLE folders ALTER COLUMN uuid TYPE CHAR(36); -ALTER TABLE folders ALTER COLUMN user_uuid TYPE CHAR(36); -ALTER TABLE collections ALTER COLUMN uuid TYPE CHAR(40); -ALTER TABLE collections ALTER COLUMN org_uuid TYPE CHAR(40); -ALTER TABLE users_collections ALTER COLUMN user_uuid TYPE CHAR(36); -ALTER TABLE users_collections ALTER COLUMN collection_uuid TYPE CHAR(36); -ALTER TABLE users_organizations ALTER COLUMN uuid TYPE CHAR(36); -ALTER TABLE users_organizations ALTER COLUMN user_uuid TYPE CHAR(36); -ALTER TABLE users_organizations ALTER COLUMN org_uuid TYPE CHAR(36); -ALTER TABLE folders_ciphers ALTER COLUMN cipher_uuid TYPE CHAR(36); -ALTER TABLE folders_ciphers ALTER COLUMN folder_uuid TYPE CHAR(36); -ALTER TABLE ciphers_collections ALTER COLUMN cipher_uuid TYPE CHAR(36); -ALTER TABLE ciphers_collections ALTER COLUMN collection_uuid TYPE CHAR(36); -ALTER TABLE twofactor ALTER COLUMN uuid TYPE CHAR(36); -ALTER TABLE twofactor ALTER COLUMN user_uuid TYPE CHAR(36); -ALTER TABLE invitations ALTER COLUMN email TYPE VARCHAR(255); -\ No newline at end of file diff --git a/migrations/postgresql/2019-09-16-150000_fix_attachments/up.sql b/migrations/postgresql/2019-09-16-150000_fix_attachments/up.sql @@ -1,27 +0,0 @@ --- Switch from CHAR() types to VARCHAR() types to avoid padding issues. -ALTER TABLE attachments ALTER COLUMN id TYPE TEXT; -ALTER TABLE attachments ALTER COLUMN cipher_uuid TYPE VARCHAR(40); -ALTER TABLE users ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE users ALTER COLUMN email TYPE TEXT; -ALTER TABLE devices ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE devices ALTER COLUMN user_uuid TYPE VARCHAR(40); -ALTER TABLE organizations ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE ciphers ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE ciphers ALTER COLUMN user_uuid TYPE VARCHAR(40); -ALTER TABLE ciphers ALTER COLUMN organization_uuid TYPE VARCHAR(40); -ALTER TABLE folders ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE folders ALTER COLUMN user_uuid TYPE VARCHAR(40); -ALTER TABLE collections ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE collections ALTER COLUMN org_uuid TYPE VARCHAR(40); -ALTER TABLE users_collections ALTER COLUMN user_uuid TYPE VARCHAR(40); -ALTER TABLE users_collections ALTER COLUMN collection_uuid TYPE VARCHAR(40); -ALTER TABLE users_organizations ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE users_organizations ALTER COLUMN user_uuid TYPE VARCHAR(40); -ALTER TABLE users_organizations ALTER COLUMN org_uuid TYPE VARCHAR(40); -ALTER TABLE folders_ciphers ALTER COLUMN cipher_uuid TYPE VARCHAR(40); -ALTER TABLE folders_ciphers ALTER COLUMN folder_uuid TYPE VARCHAR(40); -ALTER TABLE ciphers_collections ALTER COLUMN cipher_uuid TYPE VARCHAR(40); -ALTER TABLE ciphers_collections ALTER COLUMN collection_uuid TYPE VARCHAR(40); -ALTER TABLE twofactor ALTER COLUMN uuid TYPE VARCHAR(40); -ALTER TABLE twofactor ALTER COLUMN user_uuid TYPE VARCHAR(40); -ALTER TABLE invitations ALTER COLUMN email TYPE TEXT; -\ No newline at end of file diff --git a/migrations/postgresql/2019-10-10-083032_add_column_to_twofactor/down.sql b/migrations/postgresql/2019-10-10-083032_add_column_to_twofactor/down.sql diff --git a/migrations/postgresql/2019-10-10-083032_add_column_to_twofactor/up.sql b/migrations/postgresql/2019-10-10-083032_add_column_to_twofactor/up.sql @@ -1 +0,0 @@ -ALTER TABLE twofactor ADD COLUMN last_used INTEGER NOT NULL DEFAULT 0; -\ No newline at end of file diff --git a/migrations/postgresql/2019-11-17-011009_add_email_verification/down.sql b/migrations/postgresql/2019-11-17-011009_add_email_verification/down.sql @@ -1 +0,0 @@ - diff --git a/migrations/postgresql/2019-11-17-011009_add_email_verification/up.sql b/migrations/postgresql/2019-11-17-011009_add_email_verification/up.sql @@ -1,5 +0,0 @@ -ALTER TABLE users ADD COLUMN verified_at TIMESTAMP DEFAULT NULL; -ALTER TABLE users ADD COLUMN last_verifying_at TIMESTAMP DEFAULT NULL; -ALTER TABLE users ADD COLUMN login_verify_count INTEGER NOT NULL DEFAULT 0; -ALTER TABLE users ADD COLUMN email_new VARCHAR(255) DEFAULT NULL; -ALTER TABLE users ADD COLUMN email_new_token VARCHAR(16) DEFAULT NULL; diff --git a/migrations/postgresql/2020-03-13-205045_add_policy_table/down.sql b/migrations/postgresql/2020-03-13-205045_add_policy_table/down.sql @@ -1 +0,0 @@ -DROP TABLE org_policies; diff --git a/migrations/postgresql/2020-03-13-205045_add_policy_table/up.sql b/migrations/postgresql/2020-03-13-205045_add_policy_table/up.sql @@ -1,9 +0,0 @@ -CREATE TABLE org_policies ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - org_uuid CHAR(36) NOT NULL REFERENCES organizations (uuid), - atype INTEGER NOT NULL, - enabled BOOLEAN NOT NULL, - data TEXT NOT NULL, - - UNIQUE (org_uuid, atype) -); diff --git a/migrations/postgresql/2020-04-09-235005_add_cipher_delete_date/down.sql b/migrations/postgresql/2020-04-09-235005_add_cipher_delete_date/down.sql @@ -1 +0,0 @@ - diff --git a/migrations/postgresql/2020-04-09-235005_add_cipher_delete_date/up.sql b/migrations/postgresql/2020-04-09-235005_add_cipher_delete_date/up.sql @@ -1,3 +0,0 @@ -ALTER TABLE ciphers - ADD COLUMN - deleted_at TIMESTAMP; diff --git a/migrations/postgresql/2020-07-01-214531_add_hide_passwords/down.sql b/migrations/postgresql/2020-07-01-214531_add_hide_passwords/down.sql diff --git a/migrations/postgresql/2020-07-01-214531_add_hide_passwords/up.sql b/migrations/postgresql/2020-07-01-214531_add_hide_passwords/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users_collections -ADD COLUMN hide_passwords BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/migrations/postgresql/2020-08-02-025025_add_favorites_table/down.sql b/migrations/postgresql/2020-08-02-025025_add_favorites_table/down.sql @@ -1,13 +0,0 @@ -ALTER TABLE ciphers -ADD COLUMN favorite BOOLEAN NOT NULL DEFAULT FALSE; - --- Transfer favorite status for user-owned ciphers. -UPDATE ciphers -SET favorite = TRUE -WHERE EXISTS ( - SELECT * FROM favorites - WHERE favorites.user_uuid = ciphers.user_uuid - AND favorites.cipher_uuid = ciphers.uuid -); - -DROP TABLE favorites; diff --git a/migrations/postgresql/2020-08-02-025025_add_favorites_table/up.sql b/migrations/postgresql/2020-08-02-025025_add_favorites_table/up.sql @@ -1,16 +0,0 @@ -CREATE TABLE favorites ( - user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid), - cipher_uuid VARCHAR(40) NOT NULL REFERENCES ciphers(uuid), - - PRIMARY KEY (user_uuid, cipher_uuid) -); - --- Transfer favorite status for user-owned ciphers. -INSERT INTO favorites(user_uuid, cipher_uuid) -SELECT user_uuid, uuid -FROM ciphers -WHERE favorite = TRUE - AND user_uuid IS NOT NULL; - -ALTER TABLE ciphers -DROP COLUMN favorite; diff --git a/migrations/postgresql/2020-11-30-224000_add_user_enabled/down.sql b/migrations/postgresql/2020-11-30-224000_add_user_enabled/down.sql diff --git a/migrations/postgresql/2020-11-30-224000_add_user_enabled/up.sql b/migrations/postgresql/2020-11-30-224000_add_user_enabled/up.sql @@ -1 +0,0 @@ -ALTER TABLE users ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true; diff --git a/migrations/postgresql/2020-12-09-173101_add_stamp_exception/down.sql b/migrations/postgresql/2020-12-09-173101_add_stamp_exception/down.sql diff --git a/migrations/postgresql/2020-12-09-173101_add_stamp_exception/up.sql b/migrations/postgresql/2020-12-09-173101_add_stamp_exception/up.sql @@ -1 +0,0 @@ -ALTER TABLE users ADD COLUMN stamp_exception TEXT DEFAULT NULL; -\ No newline at end of file diff --git a/migrations/postgresql/2021-03-11-190243_add_sends/down.sql b/migrations/postgresql/2021-03-11-190243_add_sends/down.sql @@ -1 +0,0 @@ -DROP TABLE sends; diff --git a/migrations/postgresql/2021-03-11-190243_add_sends/up.sql b/migrations/postgresql/2021-03-11-190243_add_sends/up.sql @@ -1,25 +0,0 @@ -CREATE TABLE sends ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) REFERENCES users (uuid), - organization_uuid CHAR(36) REFERENCES organizations (uuid), - - name TEXT NOT NULL, - notes TEXT, - - atype INTEGER NOT NULL, - data TEXT NOT NULL, - key TEXT NOT NULL, - password_hash BYTEA, - password_salt BYTEA, - password_iter INTEGER, - - max_access_count INTEGER, - access_count INTEGER NOT NULL, - - creation_date TIMESTAMP NOT NULL, - revision_date TIMESTAMP NOT NULL, - expiration_date TIMESTAMP, - deletion_date TIMESTAMP NOT NULL, - - disabled BOOLEAN NOT NULL -); -\ No newline at end of file diff --git a/migrations/postgresql/2021-03-15-163412_rename_send_key/down.sql b/migrations/postgresql/2021-03-15-163412_rename_send_key/down.sql diff --git a/migrations/postgresql/2021-03-15-163412_rename_send_key/up.sql b/migrations/postgresql/2021-03-15-163412_rename_send_key/up.sql @@ -1 +0,0 @@ -ALTER TABLE sends RENAME COLUMN key TO akey; diff --git a/migrations/postgresql/2021-04-30-233251_add_reprompt/down.sql b/migrations/postgresql/2021-04-30-233251_add_reprompt/down.sql diff --git a/migrations/postgresql/2021-04-30-233251_add_reprompt/up.sql b/migrations/postgresql/2021-04-30-233251_add_reprompt/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE ciphers -ADD COLUMN reprompt INTEGER; diff --git a/migrations/postgresql/2021-05-11-205202_add_hide_email/down.sql b/migrations/postgresql/2021-05-11-205202_add_hide_email/down.sql diff --git a/migrations/postgresql/2021-05-11-205202_add_hide_email/up.sql b/migrations/postgresql/2021-05-11-205202_add_hide_email/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE sends -ADD COLUMN hide_email BOOLEAN; diff --git a/migrations/postgresql/2021-07-01-203140_add_password_reset_keys/down.sql b/migrations/postgresql/2021-07-01-203140_add_password_reset_keys/down.sql diff --git a/migrations/postgresql/2021-07-01-203140_add_password_reset_keys/up.sql b/migrations/postgresql/2021-07-01-203140_add_password_reset_keys/up.sql @@ -1,5 +0,0 @@ -ALTER TABLE organizations - ADD COLUMN private_key TEXT; - -ALTER TABLE organizations - ADD COLUMN public_key TEXT; diff --git a/migrations/postgresql/2021-08-30-193501_create_emergency_access/down.sql b/migrations/postgresql/2021-08-30-193501_create_emergency_access/down.sql @@ -1 +0,0 @@ -DROP TABLE emergency_access; diff --git a/migrations/postgresql/2021-08-30-193501_create_emergency_access/up.sql b/migrations/postgresql/2021-08-30-193501_create_emergency_access/up.sql @@ -1,14 +0,0 @@ -CREATE TABLE emergency_access ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - grantor_uuid CHAR(36) REFERENCES users (uuid), - grantee_uuid CHAR(36) REFERENCES users (uuid), - email VARCHAR(255), - key_encrypted TEXT, - atype INTEGER NOT NULL, - status INTEGER NOT NULL, - wait_time_days INTEGER NOT NULL, - recovery_initiated_at TIMESTAMP, - last_notification_at TIMESTAMP, - updated_at TIMESTAMP NOT NULL, - created_at TIMESTAMP NOT NULL -); diff --git a/migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/down.sql b/migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/down.sql @@ -1 +0,0 @@ -DROP TABLE twofactor_incomplete; diff --git a/migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/up.sql b/migrations/postgresql/2021-10-24-164321_add_2fa_incomplete/up.sql @@ -1,9 +0,0 @@ -CREATE TABLE twofactor_incomplete ( - user_uuid VARCHAR(40) NOT NULL REFERENCES users(uuid), - device_uuid VARCHAR(40) NOT NULL, - device_name TEXT NOT NULL, - login_time TIMESTAMP NOT NULL, - ip_address TEXT NOT NULL, - - PRIMARY KEY (user_uuid, device_uuid) -); diff --git a/migrations/postgresql/2022-01-17-234911_add_api_key/down.sql b/migrations/postgresql/2022-01-17-234911_add_api_key/down.sql diff --git a/migrations/postgresql/2022-01-17-234911_add_api_key/up.sql b/migrations/postgresql/2022-01-17-234911_add_api_key/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users -ADD COLUMN api_key TEXT; diff --git a/migrations/postgresql/2022-03-02-210038_update_devices_primary_key/down.sql b/migrations/postgresql/2022-03-02-210038_update_devices_primary_key/down.sql diff --git a/migrations/postgresql/2022-03-02-210038_update_devices_primary_key/up.sql b/migrations/postgresql/2022-03-02-210038_update_devices_primary_key/up.sql @@ -1,4 +0,0 @@ --- First remove the previous primary key -ALTER TABLE devices DROP CONSTRAINT devices_pkey; --- Add a new combined one -ALTER TABLE devices ADD PRIMARY KEY (uuid, user_uuid); diff --git a/migrations/postgresql/2022-07-27-110000_add_group_support/down.sql b/migrations/postgresql/2022-07-27-110000_add_group_support/down.sql @@ -1,3 +0,0 @@ -DROP TABLE groups; -DROP TABLE groups_users; -DROP TABLE collections_groups; -\ No newline at end of file diff --git a/migrations/postgresql/2022-07-27-110000_add_group_support/up.sql b/migrations/postgresql/2022-07-27-110000_add_group_support/up.sql @@ -1,23 +0,0 @@ -CREATE TABLE groups ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - organizations_uuid VARCHAR(40) NOT NULL REFERENCES organizations (uuid), - name VARCHAR(100) NOT NULL, - access_all BOOLEAN NOT NULL, - external_id VARCHAR(300) NULL, - creation_date TIMESTAMP NOT NULL, - revision_date TIMESTAMP NOT NULL -); - -CREATE TABLE groups_users ( - groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid), - users_organizations_uuid VARCHAR(36) NOT NULL REFERENCES users_organizations (uuid), - PRIMARY KEY (groups_uuid, users_organizations_uuid) -); - -CREATE TABLE collections_groups ( - collections_uuid VARCHAR(40) NOT NULL REFERENCES collections (uuid), - groups_uuid CHAR(36) NOT NULL REFERENCES groups (uuid), - read_only BOOLEAN NOT NULL, - hide_passwords BOOLEAN NOT NULL, - PRIMARY KEY (collections_uuid, groups_uuid) -); -\ No newline at end of file diff --git a/migrations/postgresql/2022-10-18-170602_add_events/down.sql b/migrations/postgresql/2022-10-18-170602_add_events/down.sql @@ -1 +0,0 @@ -DROP TABLE event; diff --git a/migrations/postgresql/2022-10-18-170602_add_events/up.sql b/migrations/postgresql/2022-10-18-170602_add_events/up.sql @@ -1,19 +0,0 @@ -CREATE TABLE event ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - event_type INTEGER NOT NULL, - user_uuid CHAR(36), - org_uuid CHAR(36), - cipher_uuid CHAR(36), - collection_uuid CHAR(36), - group_uuid CHAR(36), - org_user_uuid CHAR(36), - act_user_uuid CHAR(36), - device_type INTEGER, - ip_address TEXT, - event_date TIMESTAMP NOT NULL, - policy_uuid CHAR(36), - provider_uuid CHAR(36), - provider_user_uuid CHAR(36), - provider_org_uuid CHAR(36), - UNIQUE (uuid) -); diff --git a/migrations/postgresql/2023-01-06-151600_add_reset_password_support/down.sql b/migrations/postgresql/2023-01-06-151600_add_reset_password_support/down.sql diff --git a/migrations/postgresql/2023-01-06-151600_add_reset_password_support/up.sql b/migrations/postgresql/2023-01-06-151600_add_reset_password_support/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users_organizations -ADD COLUMN reset_password_key TEXT; diff --git a/migrations/postgresql/2023-01-11-205851_add_avatar_color/down.sql b/migrations/postgresql/2023-01-11-205851_add_avatar_color/down.sql diff --git a/migrations/postgresql/2023-01-11-205851_add_avatar_color/up.sql b/migrations/postgresql/2023-01-11-205851_add_avatar_color/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users -ADD COLUMN avatar_color TEXT; diff --git a/migrations/postgresql/2023-01-31-222222_add_argon2/down.sql b/migrations/postgresql/2023-01-31-222222_add_argon2/down.sql diff --git a/migrations/postgresql/2023-01-31-222222_add_argon2/up.sql b/migrations/postgresql/2023-01-31-222222_add_argon2/up.sql @@ -1,7 +0,0 @@ -ALTER TABLE users - ADD COLUMN - client_kdf_memory INTEGER DEFAULT NULL; - -ALTER TABLE users - ADD COLUMN - client_kdf_parallelism INTEGER DEFAULT NULL; diff --git a/migrations/postgresql/2023-02-18-125735_push_uuid_table/down.sql b/migrations/postgresql/2023-02-18-125735_push_uuid_table/down.sql diff --git a/migrations/postgresql/2023-02-18-125735_push_uuid_table/up.sql b/migrations/postgresql/2023-02-18-125735_push_uuid_table/up.sql @@ -1 +0,0 @@ -ALTER TABLE devices ADD COLUMN push_uuid TEXT; -\ No newline at end of file diff --git a/migrations/postgresql/2023-06-02-200424_create_organization_api_key/down.sql b/migrations/postgresql/2023-06-02-200424_create_organization_api_key/down.sql diff --git a/migrations/postgresql/2023-06-02-200424_create_organization_api_key/up.sql b/migrations/postgresql/2023-06-02-200424_create_organization_api_key/up.sql @@ -1,10 +0,0 @@ -CREATE TABLE organization_api_key ( - uuid CHAR(36) NOT NULL, - org_uuid CHAR(36) NOT NULL REFERENCES organizations(uuid), - atype INTEGER NOT NULL, - api_key VARCHAR(255), - revision_date TIMESTAMP NOT NULL, - PRIMARY KEY(uuid, org_uuid) -); - -ALTER TABLE users ADD COLUMN external_id TEXT; diff --git a/migrations/postgresql/2023-06-17-200424_create_auth_requests_table/down.sql b/migrations/postgresql/2023-06-17-200424_create_auth_requests_table/down.sql diff --git a/migrations/postgresql/2023-06-17-200424_create_auth_requests_table/up.sql b/migrations/postgresql/2023-06-17-200424_create_auth_requests_table/up.sql @@ -1,19 +0,0 @@ -CREATE TABLE auth_requests ( - uuid CHAR(36) NOT NULL PRIMARY KEY, - user_uuid CHAR(36) NOT NULL, - organization_uuid CHAR(36), - request_device_identifier CHAR(36) NOT NULL, - device_type INTEGER NOT NULL, - request_ip TEXT NOT NULL, - response_device_id CHAR(36), - access_code TEXT NOT NULL, - public_key TEXT NOT NULL, - enc_key TEXT NOT NULL, - master_password_hash TEXT NOT NULL, - approved BOOLEAN, - creation_date TIMESTAMP NOT NULL, - response_date TIMESTAMP, - authentication_date TIMESTAMP, - FOREIGN KEY(user_uuid) REFERENCES users(uuid), - FOREIGN KEY(organization_uuid) REFERENCES organizations(uuid) -); -\ No newline at end of file diff --git a/migrations/postgresql/2023-06-28-133700_add_collection_external_id/down.sql b/migrations/postgresql/2023-06-28-133700_add_collection_external_id/down.sql diff --git a/migrations/postgresql/2023-06-28-133700_add_collection_external_id/up.sql b/migrations/postgresql/2023-06-28-133700_add_collection_external_id/up.sql @@ -1 +0,0 @@ -ALTER TABLE collections ADD COLUMN external_id TEXT; diff --git a/migrations/postgresql/2023-09-01-170620_update_auth_request_table/down.sql b/migrations/postgresql/2023-09-01-170620_update_auth_request_table/down.sql diff --git a/migrations/postgresql/2023-09-01-170620_update_auth_request_table/up.sql b/migrations/postgresql/2023-09-01-170620_update_auth_request_table/up.sql @@ -1,5 +0,0 @@ -ALTER TABLE auth_requests -ALTER COLUMN master_password_hash DROP NOT NULL; - -ALTER TABLE auth_requests -ALTER COLUMN enc_key DROP NOT NULL; diff --git a/migrations/postgresql/2023-09-02-212336_move_user_external_id/down.sql b/migrations/postgresql/2023-09-02-212336_move_user_external_id/down.sql diff --git a/migrations/postgresql/2023-09-02-212336_move_user_external_id/up.sql b/migrations/postgresql/2023-09-02-212336_move_user_external_id/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE users_organizations -ADD COLUMN external_id TEXT; diff --git a/migrations/postgresql/2023-10-21-221242_add_cipher_key/down.sql b/migrations/postgresql/2023-10-21-221242_add_cipher_key/down.sql diff --git a/migrations/postgresql/2023-10-21-221242_add_cipher_key/up.sql b/migrations/postgresql/2023-10-21-221242_add_cipher_key/up.sql @@ -1,2 +0,0 @@ -ALTER TABLE ciphers -ADD COLUMN "key" TEXT; diff --git a/rust-toolchain.toml b/rust-toolchain.toml @@ -1,4 +0,0 @@ -[toolchain] -channel = "1.73.0" -components = [ "rustfmt", "clippy" ] -profile = "minimal" diff --git a/rustfmt.toml b/rustfmt.toml @@ -1,4 +0,0 @@ -edition = "2021" -max_width = 120 -newline_style = "Unix" -use_small_heuristics = "Off" diff --git a/src/api/admin.rs b/src/api/admin.rs @@ -1,806 +1,11 @@ -use once_cell::sync::Lazy; -use serde::de::DeserializeOwned; -use serde_json::Value; -use std::env; - -use rocket::serde::json::Json; -use rocket::{ - form::Form, - http::{Cookie, CookieJar, MediaType, SameSite, Status}, - request::{FromRequest, Outcome, Request}, - response::{content::RawHtml as Html, Redirect}, - Catcher, Route, -}; - -use crate::{ - api::{core::log_event, unregister_push_device, ApiResult, EmptyResult, JsonResult, Notify, NumberOrString}, - auth::{decode_admin, encode_jwt, generate_admin_claims, ClientIp}, - config::ConfigBuilder, - db::{backup_database, get_sql_server_version, models::*, DbConn, DbConnType}, - error::{Error, MapResult}, - mail, - util::{ - docker_base_image, format_naive_datetime_local, get_display_size, get_reqwest_client, is_running_in_docker, - }, - CONFIG, VERSION, -}; - +use rocket::{Catcher, Route}; pub fn routes() -> Vec<Route> { - if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() { - return routes![admin_disabled]; - } - - routes![ - get_users_json, - get_user_json, - get_user_by_mail_json, - post_admin_login, - admin_page, - admin_page_login, - invite_user, - logout, - delete_user, - deauth_user, - disable_user, - enable_user, - remove_2fa, - update_user_org_type, - update_revision_users, - post_config, - delete_config, - backup_db, - test_smtp, - users_overview, - organizations_overview, - delete_organization, - diagnostics, - get_diagnostics_config, - resend_user_invite, - ] + routes![admin_disabled] } - pub fn catchers() -> Vec<Catcher> { - if !CONFIG.disable_admin_token() && !CONFIG.is_admin_token_set() { - catchers![] - } else { - catchers![admin_login] - } + catchers![] } - -static DB_TYPE: Lazy<&str> = Lazy::new(|| { - DbConnType::from_url(&CONFIG.database_url()) - .map(|t| match t { - DbConnType::sqlite => "SQLite", - DbConnType::mysql => "MySQL", - DbConnType::postgresql => "PostgreSQL", - }) - .unwrap_or("Unknown") -}); - -static CAN_BACKUP: Lazy<bool> = - Lazy::new(|| DbConnType::from_url(&CONFIG.database_url()).map(|t| t == DbConnType::sqlite).unwrap_or(false)); - #[get("/")] fn admin_disabled() -> &'static str { - "The admin panel is disabled, please configure the 'ADMIN_TOKEN' variable to enable it" -} - -const COOKIE_NAME: &str = "VW_ADMIN"; -const ADMIN_PATH: &str = "/admin"; -const DT_FMT: &str = "%Y-%m-%d %H:%M:%S %Z"; - -const BASE_TEMPLATE: &str = "admin/base"; - -const ACTING_ADMIN_USER: &str = "vaultwarden-admin-00000-000000000000"; - -fn admin_path() -> String { - format!("{}{}", CONFIG.domain_path(), ADMIN_PATH) -} - -#[derive(Debug)] -struct IpHeader(Option<String>); - -#[rocket::async_trait] -impl<'r> FromRequest<'r> for IpHeader { - type Error = (); - - async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> { - if req.headers().get_one(&CONFIG.ip_header()).is_some() { - Outcome::Success(IpHeader(Some(CONFIG.ip_header()))) - } else if req.headers().get_one("X-Client-IP").is_some() { - Outcome::Success(IpHeader(Some(String::from("X-Client-IP")))) - } else if req.headers().get_one("X-Real-IP").is_some() { - Outcome::Success(IpHeader(Some(String::from("X-Real-IP")))) - } else if req.headers().get_one("X-Forwarded-For").is_some() { - Outcome::Success(IpHeader(Some(String::from("X-Forwarded-For")))) - } else { - Outcome::Success(IpHeader(None)) - } - } -} - -fn admin_url() -> String { - format!("{}{}", CONFIG.domain_origin(), admin_path()) -} - -#[derive(Responder)] -enum AdminResponse { - #[response(status = 200)] - Ok(ApiResult<Html<String>>), - #[response(status = 401)] - Unauthorized(ApiResult<Html<String>>), - #[response(status = 429)] - TooManyRequests(ApiResult<Html<String>>), -} - -#[catch(401)] -fn admin_login(request: &Request<'_>) -> ApiResult<Html<String>> { - if request.format() == Some(&MediaType::JSON) { - err_code!("Authorization failed.", Status::Unauthorized.code); - } - let redirect = request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string(); - render_admin_login(None, Some(redirect)) -} - -fn render_admin_login(msg: Option<&str>, redirect: Option<String>) -> ApiResult<Html<String>> { - // If there is an error, show it - let msg = msg.map(|msg| format!("Error: {msg}")); - let json = json!({ - "page_content": "admin/login", - "error": msg, - "redirect": redirect, - "urlpath": CONFIG.domain_path() - }); - - // Return the page - let text = CONFIG.render_template(BASE_TEMPLATE, &json)?; - Ok(Html(text)) -} - -#[derive(FromForm)] -struct LoginForm { - token: String, - redirect: Option<String>, -} - -#[post("/", data = "<data>")] -fn post_admin_login(data: Form<LoginForm>, cookies: &CookieJar<'_>, ip: ClientIp) -> Result<Redirect, AdminResponse> { - let data = data.into_inner(); - let redirect = data.redirect; - - if crate::ratelimit::check_limit_admin(&ip.ip).is_err() { - return Err(AdminResponse::TooManyRequests(render_admin_login( - Some("Too many requests, try again later."), - redirect, - ))); - } - - // If the token is invalid, redirect to login page - if !_validate_token(&data.token) { - error!("Invalid admin token. IP: {}", ip.ip); - Err(AdminResponse::Unauthorized(render_admin_login(Some("Invalid admin token, please try again."), redirect))) - } else { - // If the token received is valid, generate JWT and save it as a cookie - let claims = generate_admin_claims(); - let jwt = encode_jwt(&claims); - - let cookie = Cookie::build(COOKIE_NAME, jwt) - .path(admin_path()) - .max_age(rocket::time::Duration::minutes(CONFIG.admin_session_lifetime())) - .same_site(SameSite::Strict) - .http_only(true) - .finish(); - - cookies.add(cookie); - if let Some(redirect) = redirect { - Ok(Redirect::to(format!("{}{}", admin_path(), redirect))) - } else { - Err(AdminResponse::Ok(render_admin_page())) - } - } -} - -fn _validate_token(token: &str) -> bool { - match CONFIG.admin_token().as_ref() { - None => false, - Some(t) if t.starts_with("$argon2") => { - use argon2::password_hash::PasswordVerifier; - match argon2::password_hash::PasswordHash::new(t) { - Ok(h) => { - // NOTE: hash params from `ADMIN_TOKEN` are used instead of what is configured in the `Argon2` instance. - argon2::Argon2::default().verify_password(token.trim().as_ref(), &h).is_ok() - } - Err(e) => { - error!("The configured Argon2 PHC in `ADMIN_TOKEN` is invalid: {e}"); - false - } - } - } - Some(t) => crate::crypto::ct_eq(t.trim(), token.trim()), - } -} - -#[derive(Serialize)] -struct AdminTemplateData { - page_content: String, - page_data: Option<Value>, - logged_in: bool, - urlpath: String, -} - -impl AdminTemplateData { - fn new(page_content: &str, page_data: Value) -> Self { - Self { - page_content: String::from(page_content), - page_data: Some(page_data), - logged_in: true, - urlpath: CONFIG.domain_path(), - } - } - - fn render(self) -> Result<String, Error> { - CONFIG.render_template(BASE_TEMPLATE, &self) - } -} - -fn render_admin_page() -> ApiResult<Html<String>> { - let settings_json = json!({ - "config": CONFIG.prepare_json(), - "can_backup": *CAN_BACKUP, - }); - let text = AdminTemplateData::new("admin/settings", settings_json).render()?; - Ok(Html(text)) -} - -#[get("/")] -fn admin_page(_token: AdminToken) -> ApiResult<Html<String>> { - render_admin_page() -} - -#[get("/", rank = 2)] -fn admin_page_login() -> ApiResult<Html<String>> { - render_admin_login(None, None) -} - -#[derive(Deserialize, Debug)] -#[allow(non_snake_case)] -struct InviteData { - email: String, -} - -async fn get_user_or_404(uuid: &str, conn: &mut DbConn) -> ApiResult<User> { - if let Some(user) = User::find_by_uuid(uuid, conn).await { - Ok(user) - } else { - err_code!("User doesn't exist", Status::NotFound.code); - } -} - -#[post("/invite", data = "<data>")] -async fn invite_user(data: Json<InviteData>, _token: AdminToken, mut conn: DbConn) -> JsonResult { - let data: InviteData = data.into_inner(); - if User::find_by_mail(&data.email, &mut conn).await.is_some() { - err_code!("User already exists", Status::Conflict.code) - } - - let mut user = User::new(data.email); - - async fn _generate_invite(user: &User, conn: &mut DbConn) -> EmptyResult { - if CONFIG.mail_enabled() { - mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await - } else { - let invitation = Invitation::new(&user.email); - invitation.save(conn).await - } - } - - _generate_invite(&user, &mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; - user.save(&mut conn).await.map_err(|e| e.with_code(Status::InternalServerError.code))?; - - Ok(Json(user.to_json(&mut conn).await)) -} - -#[post("/test/smtp", data = "<data>")] -async fn test_smtp(data: Json<InviteData>, _token: AdminToken) -> EmptyResult { - let data: InviteData = data.into_inner(); - - if CONFIG.mail_enabled() { - mail::send_test(&data.email).await - } else { - err!("Mail is not enabled") - } -} - -#[get("/logout")] -fn logout(cookies: &CookieJar<'_>) -> Redirect { - cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish()); - Redirect::to(admin_path()) -} - -#[get("/users")] -async fn get_users_json(_token: AdminToken, mut conn: DbConn) -> Json<Value> { - let users = User::get_all(&mut conn).await; - let mut users_json = Vec::with_capacity(users.len()); - for u in users { - let mut usr = u.to_json(&mut conn).await; - usr["UserEnabled"] = json!(u.enabled); - usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); - usr["LastActive"] = match u.last_active(&mut conn).await { - Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)), - None => json!(None::<String>), - }; - users_json.push(usr); - } - - Json(Value::Array(users_json)) -} - -#[get("/users/overview")] -async fn users_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> { - let users = User::get_all(&mut conn).await; - let mut users_json = Vec::with_capacity(users.len()); - for u in users { - let mut usr = u.to_json(&mut conn).await; - usr["cipher_count"] = json!(Cipher::count_owned_by_user(&u.uuid, &mut conn).await); - usr["attachment_count"] = json!(Attachment::count_by_user(&u.uuid, &mut conn).await); - usr["attachment_size"] = json!(get_display_size(Attachment::size_by_user(&u.uuid, &mut conn).await as i32)); - usr["user_enabled"] = json!(u.enabled); - usr["created_at"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); - usr["last_active"] = match u.last_active(&mut conn).await { - Some(dt) => json!(format_naive_datetime_local(&dt, DT_FMT)), - None => json!("Never"), - }; - users_json.push(usr); - } - - let text = AdminTemplateData::new("admin/users", json!(users_json)).render()?; - Ok(Html(text)) -} - -#[get("/users/by-mail/<mail>")] -async fn get_user_by_mail_json(mail: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult { - if let Some(u) = User::find_by_mail(mail, &mut conn).await { - let mut usr = u.to_json(&mut conn).await; - usr["UserEnabled"] = json!(u.enabled); - usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); - Ok(Json(usr)) - } else { - err_code!("User doesn't exist", Status::NotFound.code); - } -} - -#[get("/users/<uuid>")] -async fn get_user_json(uuid: &str, _token: AdminToken, mut conn: DbConn) -> JsonResult { - let u = get_user_or_404(uuid, &mut conn).await?; - let mut usr = u.to_json(&mut conn).await; - usr["UserEnabled"] = json!(u.enabled); - usr["CreatedAt"] = json!(format_naive_datetime_local(&u.created_at, DT_FMT)); - Ok(Json(usr)) -} - -#[post("/users/<uuid>/delete")] -async fn delete_user(uuid: &str, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let user = get_user_or_404(uuid, &mut conn).await?; - - // Get the user_org records before deleting the actual user - let user_orgs = UserOrganization::find_any_state_by_user(uuid, &mut conn).await; - let res = user.delete(&mut conn).await; - - for user_org in user_orgs { - log_event( - EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - &user_org.org_uuid, - String::from(ACTING_ADMIN_USER), - 14, // Use UnknownBrowser type - &token.ip.ip, - &mut conn, - ) - .await; - } - - res -} - -#[post("/users/<uuid>/deauth")] -async fn deauth_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; - - nt.send_logout(&user, None).await; - - if CONFIG.push_enabled() { - for device in Device::find_push_devices_by_user(&user.uuid, &mut conn).await { - match unregister_push_device(device.uuid).await { - Ok(r) => r, - Err(e) => error!("Unable to unregister devices from Bitwarden server: {}", e), - }; - } - } - - Device::delete_all_by_user(&user.uuid, &mut conn).await?; - user.reset_security_stamp(); - - user.save(&mut conn).await -} - -#[post("/users/<uuid>/disable")] -async fn disable_user(uuid: &str, _token: AdminToken, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; - Device::delete_all_by_user(&user.uuid, &mut conn).await?; - user.reset_security_stamp(); - user.enabled = false; - - let save_result = user.save(&mut conn).await; - - nt.send_logout(&user, None).await; - - save_result -} - -#[post("/users/<uuid>/enable")] -async fn enable_user(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; - user.enabled = true; - - user.save(&mut conn).await -} - -#[post("/users/<uuid>/remove-2fa")] -async fn remove_2fa(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - let mut user = get_user_or_404(uuid, &mut conn).await?; - TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?; - user.totp_recover = None; - user.save(&mut conn).await -} - -#[post("/users/<uuid>/invite/resend")] -async fn resend_user_invite(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - if let Some(user) = User::find_by_uuid(uuid, &mut conn).await { - //TODO: replace this with user.status check when it will be available (PR#3397) - if !user.password_hash.is_empty() { - err_code!("User already accepted invitation", Status::BadRequest.code); - } - - if CONFIG.mail_enabled() { - mail::send_invite(&user.email, &user.uuid, None, None, &CONFIG.invitation_org_name(), None).await - } else { - Ok(()) - } - } else { - err_code!("User doesn't exist", Status::NotFound.code); - } -} - -#[derive(Deserialize, Debug)] -struct UserOrgTypeData { - user_type: NumberOrString, - user_uuid: String, - org_uuid: String, -} - -#[post("/users/org_type", data = "<data>")] -async fn update_user_org_type(data: Json<UserOrgTypeData>, token: AdminToken, mut conn: DbConn) -> EmptyResult { - let data: UserOrgTypeData = data.into_inner(); - - let mut user_to_edit = - match UserOrganization::find_by_user_and_org(&data.user_uuid, &data.org_uuid, &mut conn).await { - Some(user) => user, - None => err!("The specified user isn't member of the organization"), - }; - - let new_type = match UserOrgType::from_str(&data.user_type.into_string()) { - Some(new_type) => new_type as i32, - None => err!("Invalid type"), - }; - - if user_to_edit.atype == UserOrgType::Owner && new_type != UserOrgType::Owner { - // Removing owner permission, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(&data.org_uuid, UserOrgType::Owner, &mut conn).await <= 1 { - err!("Can't change the type of the last owner") - } - } - - // This check is also done at api::organizations::{accept_invite(), _confirm_invite, _activate_user(), edit_user()}, update_user_org_type - // It returns different error messages per function. - if new_type < UserOrgType::Admin { - match OrgPolicy::is_user_allowed(&user_to_edit.user_uuid, &user_to_edit.org_uuid, true, &mut conn).await { - Ok(_) => {} - Err(OrgPolicyErr::TwoFactorMissing) => { - err!("You cannot modify this user to this type because it has no two-step login method activated"); - } - Err(OrgPolicyErr::SingleOrgEnforced) => { - err!("You cannot modify this user to this type because it is a member of an organization which forbids it"); - } - } - } - - log_event( - EventType::OrganizationUserUpdated as i32, - &user_to_edit.uuid, - &data.org_uuid, - String::from(ACTING_ADMIN_USER), - 14, // Use UnknownBrowser type - &token.ip.ip, - &mut conn, - ) - .await; - - user_to_edit.atype = new_type; - user_to_edit.save(&mut conn).await -} - -#[post("/users/update_revision")] -async fn update_revision_users(_token: AdminToken, mut conn: DbConn) -> EmptyResult { - User::update_all_revisions(&mut conn).await -} - -#[get("/organizations/overview")] -async fn organizations_overview(_token: AdminToken, mut conn: DbConn) -> ApiResult<Html<String>> { - let organizations = Organization::get_all(&mut conn).await; - let mut organizations_json = Vec::with_capacity(organizations.len()); - for o in organizations { - let mut org = o.to_json(); - org["user_count"] = json!(UserOrganization::count_by_org(&o.uuid, &mut conn).await); - org["cipher_count"] = json!(Cipher::count_by_org(&o.uuid, &mut conn).await); - org["collection_count"] = json!(Collection::count_by_org(&o.uuid, &mut conn).await); - org["group_count"] = json!(Group::count_by_org(&o.uuid, &mut conn).await); - org["event_count"] = json!(Event::count_by_org(&o.uuid, &mut conn).await); - org["attachment_count"] = json!(Attachment::count_by_org(&o.uuid, &mut conn).await); - org["attachment_size"] = json!(get_display_size(Attachment::size_by_org(&o.uuid, &mut conn).await as i32)); - organizations_json.push(org); - } - - let text = AdminTemplateData::new("admin/organizations", json!(organizations_json)).render()?; - Ok(Html(text)) -} - -#[post("/organizations/<uuid>/delete")] -async fn delete_organization(uuid: &str, _token: AdminToken, mut conn: DbConn) -> EmptyResult { - let org = Organization::find_by_uuid(uuid, &mut conn).await.map_res("Organization doesn't exist")?; - org.delete(&mut conn).await -} - -#[derive(Deserialize)] -struct WebVaultVersion { - version: String, -} - -#[derive(Deserialize)] -struct GitRelease { - tag_name: String, -} - -#[derive(Deserialize)] -struct GitCommit { - sha: String, -} - -#[derive(Deserialize)] -struct TimeApi { - year: u16, - month: u8, - day: u8, - hour: u8, - minute: u8, - seconds: u8, -} - -async fn get_json_api<T: DeserializeOwned>(url: &str) -> Result<T, Error> { - let json_api = get_reqwest_client(); - - Ok(json_api.get(url).send().await?.error_for_status()?.json::<T>().await?) -} - -async fn has_http_access() -> bool { - let http_access = get_reqwest_client(); - - match http_access.head("https://github.com/dani-garcia/vaultwarden").send().await { - Ok(r) => r.status().is_success(), - _ => false, - } -} - -use cached::proc_macro::cached; -/// Cache this function to prevent API call rate limit. Github only allows 60 requests per hour, and we use 3 here already. -/// It will cache this function for 300 seconds (5 minutes) which should prevent the exhaustion of the rate limit. -#[cached(time = 300, sync_writes = true)] -async fn get_release_info(has_http_access: bool, running_within_docker: bool) -> (String, String, String) { - // If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway. - if has_http_access { - ( - match get_json_api::<GitRelease>("https://api.github.com/repos/dani-garcia/vaultwarden/releases/latest") - .await - { - Ok(r) => r.tag_name, - _ => "-".to_string(), - }, - match get_json_api::<GitCommit>("https://api.github.com/repos/dani-garcia/vaultwarden/commits/main").await { - Ok(mut c) => { - c.sha.truncate(8); - c.sha - } - _ => "-".to_string(), - }, - // Do not fetch the web-vault version when running within Docker. - // The web-vault version is embedded within the container it self, and should not be updated manually - if running_within_docker { - "-".to_string() - } else { - match get_json_api::<GitRelease>( - "https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest", - ) - .await - { - Ok(r) => r.tag_name.trim_start_matches('v').to_string(), - _ => "-".to_string(), - } - }, - ) - } else { - ("-".to_string(), "-".to_string(), "-".to_string()) - } -} - -async fn get_ntp_time(has_http_access: bool) -> String { - if has_http_access { - if let Ok(ntp_time) = get_json_api::<TimeApi>("https://www.timeapi.io/api/Time/current/zone?timeZone=UTC").await - { - return format!( - "{year}-{month:02}-{day:02} {hour:02}:{minute:02}:{seconds:02} UTC", - year = ntp_time.year, - month = ntp_time.month, - day = ntp_time.day, - hour = ntp_time.hour, - minute = ntp_time.minute, - seconds = ntp_time.seconds - ); - } - } - String::from("Unable to fetch NTP time.") -} - -#[get("/diagnostics")] -async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn) -> ApiResult<Html<String>> { - use chrono::prelude::*; - use std::net::ToSocketAddrs; - - // Get current running versions - let web_vault_version: WebVaultVersion = - match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "vw-version.json")) { - Ok(s) => serde_json::from_str(&s)?, - _ => match std::fs::read_to_string(format!("{}/{}", CONFIG.web_vault_folder(), "version.json")) { - Ok(s) => serde_json::from_str(&s)?, - _ => WebVaultVersion { - version: String::from("Version file missing"), - }, - }, - }; - - // Execute some environment checks - let running_within_docker = is_running_in_docker(); - let has_http_access = has_http_access().await; - let uses_proxy = env::var_os("HTTP_PROXY").is_some() - || env::var_os("http_proxy").is_some() - || env::var_os("HTTPS_PROXY").is_some() - || env::var_os("https_proxy").is_some(); - - // Check if we are able to resolve DNS entries - let dns_resolved = match ("github.com", 0).to_socket_addrs().map(|mut i| i.next()) { - Ok(Some(a)) => a.ip().to_string(), - _ => "Unable to resolve domain name.".to_string(), - }; - - let (latest_release, latest_commit, latest_web_build) = - get_release_info(has_http_access, running_within_docker).await; - - let ip_header_name = match &ip_header.0 { - Some(h) => h, - _ => "", - }; - - let diagnostics_json = json!({ - "dns_resolved": dns_resolved, - "current_release": VERSION, - "latest_release": latest_release, - "latest_commit": latest_commit, - "web_vault_enabled": &CONFIG.web_vault_enabled(), - "web_vault_version": web_vault_version.version.trim_start_matches('v'), - "latest_web_build": latest_web_build, - "running_within_docker": running_within_docker, - "docker_base_image": if running_within_docker { docker_base_image() } else { "Not applicable" }, - "has_http_access": has_http_access, - "ip_header_exists": &ip_header.0.is_some(), - "ip_header_match": ip_header_name == CONFIG.ip_header(), - "ip_header_name": ip_header_name, - "ip_header_config": &CONFIG.ip_header(), - "uses_proxy": uses_proxy, - "db_type": *DB_TYPE, - "db_version": get_sql_server_version(&mut conn).await, - "admin_url": format!("{}/diagnostics", admin_url()), - "overrides": &CONFIG.get_overrides().join(", "), - "host_arch": std::env::consts::ARCH, - "host_os": std::env::consts::OS, - "server_time_local": Local::now().format("%Y-%m-%d %H:%M:%S %Z").to_string(), - "server_time": Utc::now().format("%Y-%m-%d %H:%M:%S UTC").to_string(), // Run the server date/time check as late as possible to minimize the time difference - "ntp_time": get_ntp_time(has_http_access).await, // Run the ntp check as late as possible to minimize the time difference - }); - - let text = AdminTemplateData::new("admin/diagnostics", diagnostics_json).render()?; - Ok(Html(text)) -} - -#[get("/diagnostics/config")] -fn get_diagnostics_config(_token: AdminToken) -> Json<Value> { - let support_json = CONFIG.get_support_json(); - Json(support_json) -} - -#[post("/config", data = "<data>")] -fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult { - let data: ConfigBuilder = data.into_inner(); - CONFIG.update_config(data) -} - -#[post("/config/delete")] -fn delete_config(_token: AdminToken) -> EmptyResult { - CONFIG.delete_user_config() -} - -#[post("/config/backup_db")] -async fn backup_db(_token: AdminToken, mut conn: DbConn) -> EmptyResult { - if *CAN_BACKUP { - backup_database(&mut conn).await - } else { - err!("Can't back up current DB (Only SQLite supports this feature)"); - } -} - -pub struct AdminToken { - ip: ClientIp, -} - -#[rocket::async_trait] -impl<'r> FromRequest<'r> for AdminToken { - type Error = &'static str; - - async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { - let ip = match ClientIp::from_request(request).await { - Outcome::Success(ip) => ip, - _ => err_handler!("Error getting Client IP"), - }; - - if CONFIG.disable_admin_token() { - Outcome::Success(Self { - ip, - }) - } else { - let cookies = request.cookies(); - - let access_token = match cookies.get(COOKIE_NAME) { - Some(cookie) => cookie.value(), - None => { - let requested_page = - request.segments::<std::path::PathBuf>(0..).unwrap_or_default().display().to_string(); - // When the requested page is empty, it is `/admin`, in that case, Forward, so it will render the login page - // Else, return a 401 failure, which will be caught - if requested_page.is_empty() { - return Outcome::Forward(Status::Unauthorized); - } else { - return Outcome::Failure((Status::Unauthorized, "Unauthorized")); - } - } - }; - - if decode_admin(access_token).is_err() { - // Remove admin cookie - cookies.remove(Cookie::build(COOKIE_NAME, "").path(admin_path()).finish()); - error!("Invalid or expired admin JWT. IP: {}.", &ip.ip); - return Outcome::Failure((Status::Unauthorized, "Session expired")); - } - - Outcome::Success(Self { - ip, - }) - } - } + "The admin panel is not allowed to be enabled." } diff --git a/src/api/core/accounts.rs b/src/api/core/accounts.rs @@ -1,12 +1,11 @@ -use crate::db::DbPool; use chrono::Utc; use rocket::serde::json::Json; use serde_json::Value; use crate::{ api::{ - core::log_user_event, register_push_device, unregister_push_device, AnonymousNotify, EmptyResult, JsonResult, - JsonUpcase, Notify, NumberOrString, PasswordData, UpdateType, + AnonymousNotify, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, PasswordData, + UpdateType, }, auth::{decode_delete, decode_invite, decode_verify_email, ClientHeaders, Headers}, crypto, @@ -144,13 +143,18 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json err!("Registration email does not match invite email") } } else if Invitation::take(&email, &mut conn).await { - for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn).await.iter_mut() { + for user_org in UserOrganization::find_invited_by_user(&user.uuid, &mut conn) + .await + .iter_mut() + { user_org.status = UserOrgStatus::Accepted as i32; user_org.save(&mut conn).await?; } user } else if CONFIG.is_signup_allowed(&email) - || EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn).await.is_some() + || EmergencyAccess::find_invited_by_grantee_email(&email, &mut conn) + .await + .is_some() { user } else { @@ -198,13 +202,10 @@ pub async fn _register(data: JsonUpcase<RegisterData>, mut conn: DbConn) -> Json if CONFIG.mail_enabled() { if CONFIG.signups_verify() && !verified_by_invite { - if let Err(e) = mail::send_welcome_must_verify(&user.email, &user.uuid).await { - error!("Error sending welcome email: {:#?}", e); - } - + mail::send_welcome_must_verify(&user.email, &user.uuid).await?; user.last_verifying_at = Some(user.created_at); - } else if let Err(e) = mail::send_welcome(&user.email).await { - error!("Error sending welcome email: {:#?}", e); + } else { + mail::send_welcome(&user.email).await?; } } @@ -234,7 +235,11 @@ async fn put_profile(data: JsonUpcase<ProfileData>, headers: Headers, conn: DbCo } #[post("/accounts/profile", data = "<data>")] -async fn post_profile(data: JsonUpcase<ProfileData>, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn post_profile( + data: JsonUpcase<ProfileData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { let data: ProfileData = data.into_inner().data; // Check if the length of the username exceeds 50 characters (Same is Upstream Bitwarden) @@ -257,7 +262,11 @@ struct AvatarData { } #[put("/accounts/avatar", data = "<data>")] -async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn put_avatar( + data: JsonUpcase<AvatarData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { let data: AvatarData = data.into_inner().data; // It looks like it only supports the 6 hex color format. @@ -265,7 +274,9 @@ async fn put_avatar(data: JsonUpcase<AvatarData>, headers: Headers, mut conn: Db // Check and force 7 chars, including the #. if let Some(color) = &data.AvatarColor { if color.len() != 7 { - err!("The field AvatarColor must be a HTML/Hex color code with a length of 7 characters") + err!( + "The field AvatarColor must be a HTML/Hex color code with a length of 7 characters" + ) } } @@ -330,18 +341,17 @@ async fn post_password( if !user.check_valid_password(&data.MasterPasswordHash) { err!("Invalid password") } - user.password_hint = clean_password_hint(&data.MasterPasswordHint); enforce_password_hint_setting(&user.password_hint)?; - - log_user_event(EventType::UserChangedPassword as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn) - .await; - user.set_password( &data.NewMasterPasswordHash, Some(data.Key), true, - Some(vec![String::from("post_rotatekey"), String::from("get_contacts"), String::from("get_public_keys")]), + Some(vec![ + String::from("post_rotatekey"), + String::from("get_contacts"), + String::from("get_public_keys"), + ]), ); let save_result = user.save(&mut conn).await; @@ -368,7 +378,12 @@ struct ChangeKdfData { } #[post("/accounts/kdf", data = "<data>")] -async fn post_kdf(data: JsonUpcase<ChangeKdfData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_kdf( + data: JsonUpcase<ChangeKdfData>, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { let data: ChangeKdfData = data.into_inner().data; let mut user = headers.user; @@ -434,7 +449,12 @@ struct KeyData { } #[post("/accounts/key", data = "<data>")] -async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn post_rotatekey( + data: JsonUpcase<KeyData>, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { let data: KeyData = data.into_inner().data; if !headers.user.check_valid_password(&data.MasterPasswordHash) { @@ -468,10 +488,11 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D use super::ciphers::update_cipher_from_data; for cipher_data in data.Ciphers { - let mut saved_cipher = match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await { - Some(cipher) => cipher, - None => err!("Cipher doesn't exist"), - }; + let mut saved_cipher = + match Cipher::find_by_uuid(cipher_data.Id.as_ref().unwrap(), &mut conn).await { + Some(cipher) => cipher, + None => err!("Cipher doesn't exist"), + }; if saved_cipher.user_uuid.as_ref().unwrap() != user_uuid { err!("The cipher is not owned by the user") @@ -480,8 +501,16 @@ async fn post_rotatekey(data: JsonUpcase<KeyData>, headers: Headers, mut conn: D // Prevent triggering cipher updates via WebSockets by settings UpdateType::None // The user sessions are invalidated because all the ciphers were re-encrypted and thus triggering an update could cause issues. // We force the users to logout after the user has been saved to try and prevent these issues. - update_cipher_from_data(&mut saved_cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None) - .await? + update_cipher_from_data( + &mut saved_cipher, + cipher_data, + &headers, + false, + &mut conn, + &nt, + UpdateType::None, + ) + .await? } // Update user data @@ -532,7 +561,11 @@ struct EmailTokenData { } #[post("/accounts/email-token", data = "<data>")] -async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn post_email_token( + data: JsonUpcase<EmailTokenData>, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { if !CONFIG.email_change_allowed() { err!("Email change is not allowed."); } @@ -544,7 +577,10 @@ async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mu err!("Invalid password") } - if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() { + if User::find_by_mail(&data.NewEmail, &mut conn) + .await + .is_some() + { err!("Email already in use"); } @@ -555,9 +591,7 @@ async fn post_email_token(data: JsonUpcase<EmailTokenData>, headers: Headers, mu let token = crypto::generate_email_token(6); if CONFIG.mail_enabled() { - if let Err(e) = mail::send_change_email(&data.NewEmail, &token).await { - error!("Error sending change-email email: {:#?}", e); - } + mail::send_change_email(&data.NewEmail, &token).await? } user.email_new = Some(data.NewEmail); @@ -594,7 +628,10 @@ async fn post_email( err!("Invalid password") } - if User::find_by_mail(&data.NewEmail, &mut conn).await.is_some() { + if User::find_by_mail(&data.NewEmail, &mut conn) + .await + .is_some() + { err!("Email already in use"); } @@ -642,12 +679,7 @@ async fn post_verify_email(headers: Headers) -> EmptyResult { if !CONFIG.mail_enabled() { err!("Cannot verify email address"); } - - if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await { - error!("Error sending verify_email email: {:#?}", e); - } - - Ok(()) + mail::send_verify_email(&user.email, &user.uuid).await } #[derive(Deserialize)] @@ -658,7 +690,10 @@ struct VerifyEmailTokenData { } #[post("/accounts/verify-email-token", data = "<data>")] -async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, mut conn: DbConn) -> EmptyResult { +async fn post_verify_email_token( + data: JsonUpcase<VerifyEmailTokenData>, + mut conn: DbConn, +) -> EmptyResult { let data: VerifyEmailTokenData = data.into_inner().data; let mut user = match User::find_by_uuid(&data.UserId, &mut conn).await { @@ -676,11 +711,7 @@ async fn post_verify_email_token(data: JsonUpcase<VerifyEmailTokenData>, mut con user.verified_at = Some(Utc::now().naive_utc()); user.last_verifying_at = None; user.login_verify_count = 0; - if let Err(e) = user.save(&mut conn).await { - error!("Error saving email verification: {:#?}", e); - } - - Ok(()) + user.save(&mut conn).await } #[derive(Deserialize)] @@ -695,11 +726,10 @@ async fn post_delete_recover(data: JsonUpcase<DeleteRecoverData>, mut conn: DbCo if CONFIG.mail_enabled() { if let Some(user) = User::find_by_mail(&data.Email, &mut conn).await { - if let Err(e) = mail::send_delete_account(&user.email, &user.uuid).await { - error!("Error sending delete account email: {:#?}", e); - } + mail::send_delete_account(&user.email, &user.uuid).await + } else { + Ok(()) } - Ok(()) } else { // We don't support sending emails, but we shouldn't allow anybody // to delete accounts without at least logging in... And if the user @@ -717,7 +747,10 @@ struct DeleteRecoverTokenData { } #[post("/accounts/delete-recover-token", data = "<data>")] -async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut conn: DbConn) -> EmptyResult { +async fn post_delete_recover_token( + data: JsonUpcase<DeleteRecoverTokenData>, + mut conn: DbConn, +) -> EmptyResult { let data: DeleteRecoverTokenData = data.into_inner().data; let user = match User::find_by_uuid(&data.UserId, &mut conn).await { @@ -736,12 +769,20 @@ async fn post_delete_recover_token(data: JsonUpcase<DeleteRecoverTokenData>, mut } #[post("/accounts/delete", data = "<data>")] -async fn post_delete_account(data: JsonUpcase<PasswordData>, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_delete_account( + data: JsonUpcase<PasswordData>, + headers: Headers, + conn: DbConn, +) -> EmptyResult { delete_account(data, headers, conn).await } #[delete("/accounts", data = "<data>")] -async fn delete_account(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn delete_account( + data: JsonUpcase<PasswordData>, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { let data: PasswordData = data.into_inner().data; let user = headers.user; @@ -820,10 +861,21 @@ async fn prelogin(data: JsonUpcase<PreloginData>, conn: DbConn) -> Json<Value> { pub async fn _prelogin(data: JsonUpcase<PreloginData>, mut conn: DbConn) -> Json<Value> { let data: PreloginData = data.into_inner().data; - let (kdf_type, kdf_iter, kdf_mem, kdf_para) = match User::find_by_mail(&data.Email, &mut conn).await { - Some(user) => (user.client_kdf_type, user.client_kdf_iter, user.client_kdf_memory, user.client_kdf_parallelism), - None => (User::CLIENT_KDF_TYPE_DEFAULT, User::CLIENT_KDF_ITER_DEFAULT, None, None), - }; + let (kdf_type, kdf_iter, kdf_mem, kdf_para) = + match User::find_by_mail(&data.Email, &mut conn).await { + Some(user) => ( + user.client_kdf_type, + user.client_kdf_iter, + user.client_kdf_memory, + user.client_kdf_parallelism, + ), + None => ( + User::CLIENT_KDF_TYPE_DEFAULT, + User::CLIENT_KDF_ITER_DEFAULT, + None, + None, + ), + }; let result = json!({ "Kdf": kdf_type, @@ -882,12 +934,20 @@ async fn _api_key( } #[post("/accounts/api-key", data = "<data>")] -async fn api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult { +async fn api_key( + data: JsonUpcase<SecretVerificationRequest>, + headers: Headers, + conn: DbConn, +) -> JsonResult { _api_key(data, false, headers, conn).await } #[post("/accounts/rotate-api-key", data = "<data>")] -async fn rotate_api_key(data: JsonUpcase<SecretVerificationRequest>, headers: Headers, conn: DbConn) -> JsonResult { +async fn rotate_api_key( + data: JsonUpcase<SecretVerificationRequest>, + headers: Headers, + conn: DbConn, +) -> JsonResult { _api_key(data, true, headers, conn).await } @@ -897,7 +957,9 @@ async fn get_known_device_from_path(email: &str, uuid: &str, mut conn: DbConn) - // This endpoint doesn't have auth header let mut result = false; if let Some(user) = User::find_by_mail(email, &mut conn).await { - result = Device::find_by_uuid_and_user(uuid, &user.uuid, &mut conn).await.is_some(); + result = Device::find_by_uuid_and_user(uuid, &user.uuid, &mut conn) + .await + .is_some(); } Ok(Json(json!(result))) } @@ -930,7 +992,10 @@ impl<'r> FromRequest<'r> for KnownDevice { match String::from_utf8(email_bytes) { Ok(email) => email, Err(_) => { - return Outcome::Failure((Status::BadRequest, "X-Request-Email value failed to decode as UTF-8")); + return Outcome::Failure(( + Status::BadRequest, + "X-Request-Email value failed to decode as UTF-8", + )); } } } else { @@ -943,72 +1008,43 @@ impl<'r> FromRequest<'r> for KnownDevice { return Outcome::Failure((Status::BadRequest, "X-Device-Identifier value is required")); }; - Outcome::Success(KnownDevice { - email, - uuid, - }) + Outcome::Success(KnownDevice { email, uuid }) } } #[derive(Deserialize)] #[allow(non_snake_case)] -struct PushToken { - PushToken: String, -} +struct PushToken; #[post("/devices/identifier/<uuid>/token", data = "<data>")] -async fn post_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, conn: DbConn) -> EmptyResult { - put_device_token(uuid, data, headers, conn).await +fn post_device_token( + uuid: &str, + data: JsonUpcase<PushToken>, + headers: Headers, + conn: DbConn, +) -> EmptyResult { + put_device_token(uuid, data, headers, conn) } - +#[allow(unused_variables)] #[put("/devices/identifier/<uuid>/token", data = "<data>")] -async fn put_device_token(uuid: &str, data: JsonUpcase<PushToken>, headers: Headers, mut conn: DbConn) -> EmptyResult { - if !CONFIG.push_enabled() { - return Ok(()); - } - - let data = data.into_inner().data; - let token = data.PushToken; - let mut device = match Device::find_by_uuid_and_user(&headers.device.uuid, &headers.user.uuid, &mut conn).await { - Some(device) => device, - None => err!(format!("Error: device {uuid} should be present before a token can be assigned")), - }; - device.push_token = Some(token); - if device.push_uuid.is_none() { - device.push_uuid = Some(uuid::Uuid::new_v4().to_string()); - } - if let Err(e) = device.save(&mut conn).await { - err!(format!("An error occurred while trying to save the device push token: {e}")); - } - if let Err(e) = register_push_device(headers.user.uuid, device).await { - err!(format!("An error occurred while proceeding registration of a device: {e}")); - } - +fn put_device_token( + uuid: &str, + data: JsonUpcase<PushToken>, + _headers: Headers, + _conn: DbConn, +) -> EmptyResult { Ok(()) } - +#[allow(unused_variables)] #[put("/devices/identifier/<uuid>/clear-token")] -async fn put_clear_device_token(uuid: &str, mut conn: DbConn) -> EmptyResult { - // This only clears push token - // https://github.com/bitwarden/core/blob/master/src/Api/Controllers/DevicesController.cs#L109 - // https://github.com/bitwarden/core/blob/master/src/Core/Services/Implementations/DeviceService.cs#L37 - // This is somehow not implemented in any app, added it in case it is required - if !CONFIG.push_enabled() { - return Ok(()); - } - - if let Some(device) = Device::find_by_uuid(uuid, &mut conn).await { - Device::clear_push_token_by_uuid(uuid, &mut conn).await?; - unregister_push_device(device.uuid).await?; - } - +fn put_clear_device_token(uuid: &str, _conn: DbConn) -> EmptyResult { Ok(()) } // On upstream server, both PUT and POST are declared. Implementing the POST method in case it would be useful somewhere #[post("/devices/identifier/<uuid>/clear-token")] -async fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult { - put_clear_device_token(uuid, conn).await +fn post_clear_device_token(uuid: &str, conn: DbConn) -> EmptyResult { + put_clear_device_token(uuid, conn) } #[derive(Debug, Deserialize)] @@ -1048,7 +1084,13 @@ async fn post_auth_request( ); auth_request.save(&mut conn).await?; - nt.send_auth_request(&user.uuid, &auth_request.uuid, &data.deviceIdentifier, &mut conn).await; + nt.send_auth_request( + &user.uuid, + &auth_request.uuid, + &data.deviceIdentifier, + &mut conn, + ) + .await; Ok(Json(json!({ "id": auth_request.uuid, @@ -1074,7 +1116,9 @@ async fn get_auth_request(uuid: &str, mut conn: DbConn) -> JsonResult { } }; - let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc()); + let response_date_utc = auth_request + .response_date + .map(|response_date| response_date.and_utc()); Ok(Json(json!( { @@ -1125,11 +1169,20 @@ async fn put_auth_request( auth_request.save(&mut conn).await?; if auth_request.approved.unwrap_or(false) { - ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid).await; - nt.send_auth_response(&auth_request.user_uuid, &auth_request.uuid, data.deviceIdentifier, &mut conn).await; + ant.send_auth_response(&auth_request.user_uuid, &auth_request.uuid) + .await; + nt.send_auth_response( + &auth_request.user_uuid, + &auth_request.uuid, + data.deviceIdentifier, + &mut conn, + ) + .await; } - let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc()); + let response_date_utc = auth_request + .response_date + .map(|response_date| response_date.and_utc()); Ok(Json(json!( { @@ -1161,7 +1214,9 @@ async fn get_auth_request_response(uuid: &str, code: &str, mut conn: DbConn) -> err!("Access code invalid doesn't exist") } - let response_date_utc = auth_request.response_date.map(|response_date| response_date.and_utc()); + let response_date_utc = auth_request + .response_date + .map(|response_date| response_date.and_utc()); Ok(Json(json!( { @@ -1209,12 +1264,3 @@ async fn get_auth_requests(headers: Headers, mut conn: DbConn) -> JsonResult { "object": "list" }))) } - -pub async fn purge_auth_requests(pool: DbPool) { - debug!("Purging auth requests"); - if let Ok(mut conn) = pool.get().await { - AuthRequest::purge_expired_auth_requests(&mut conn).await; - } else { - error!("Failed to get DB connection while purging trashed ciphers") - } -} diff --git a/src/api/core/ciphers.rs b/src/api/core/ciphers.rs @@ -1,5 +1,11 @@ -use std::collections::{HashMap, HashSet}; - +use super::folders::FolderData; +use crate::{ + api::{self, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}, + auth::Headers, + crypto, + db::{models::*, DbConn}, + CONFIG, +}; use chrono::{NaiveDateTime, Utc}; use rocket::fs::TempFile; use rocket::serde::json::Json; @@ -8,17 +14,7 @@ use rocket::{ Route, }; use serde_json::Value; - -use crate::{ - api::{self, core::log_event, EmptyResult, JsonResult, JsonUpcase, Notify, PasswordData, UpdateType}, - auth::Headers, - crypto, - db::{models::*, DbConn, DbPool}, - CONFIG, -}; - -use super::folders::FolderData; - +use std::collections::{HashMap, HashSet}; pub fn routes() -> Vec<Route> { // Note that many routes have an `admin` variant; this seems to be // because the stored procedure that upstream Bitwarden uses to determine @@ -84,15 +80,6 @@ pub fn routes() -> Vec<Route> { ] } -pub async fn purge_trashed_ciphers(pool: DbPool) { - debug!("Purging trashed ciphers"); - if let Ok(mut conn) = pool.get().await { - Cipher::purge_trash(&mut conn).await; - } else { - error!("Failed to get DB connection while purging trashed ciphers") - } -} - #[derive(FromForm, Default)] struct SyncData { #[field(name = "excludeDomains")] @@ -106,31 +93,51 @@ async fn sync(data: SyncData, headers: Headers, mut conn: DbConn) -> Json<Value> // Get all ciphers which are visible by the user let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await; - let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await; + let cipher_sync_data = + CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await; // Lets generate the ciphers_json using all the gathered info let mut ciphers_json = Vec::with_capacity(ciphers.len()); for c in ciphers { ciphers_json.push( - c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn) - .await, + c.to_json( + &headers.host, + &headers.user.uuid, + Some(&cipher_sync_data), + CipherSyncType::User, + &mut conn, + ) + .await, ); } let collections = Collection::find_by_user_uuid(headers.user.uuid.clone(), &mut conn).await; let mut collections_json = Vec::with_capacity(collections.len()); for c in collections { - collections_json.push(c.to_json_details(&headers.user.uuid, Some(&cipher_sync_data), &mut conn).await); + collections_json.push( + c.to_json_details(&headers.user.uuid, Some(&cipher_sync_data), &mut conn) + .await, + ); } - let folders_json: Vec<Value> = - Folder::find_by_user(&headers.user.uuid, &mut conn).await.iter().map(Folder::to_json).collect(); + let folders_json: Vec<Value> = Folder::find_by_user(&headers.user.uuid, &mut conn) + .await + .iter() + .map(Folder::to_json) + .collect(); - let sends_json: Vec<Value> = - Send::find_by_user(&headers.user.uuid, &mut conn).await.iter().map(Send::to_json).collect(); + let sends_json: Vec<Value> = Send::find_by_user(&headers.user.uuid, &mut conn) + .await + .iter() + .map(Send::to_json) + .collect(); let policies_json: Vec<Value> = - OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &mut conn).await.iter().map(OrgPolicy::to_json).collect(); + OrgPolicy::find_confirmed_by_user(&headers.user.uuid, &mut conn) + .await + .iter() + .map(OrgPolicy::to_json) + .collect(); let domains_json = if data.exclude_domains { Value::Null @@ -154,13 +161,20 @@ async fn sync(data: SyncData, headers: Headers, mut conn: DbConn) -> Json<Value> #[get("/ciphers")] async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> { let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await; - let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await; + let cipher_sync_data = + CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await; let mut ciphers_json = Vec::with_capacity(ciphers.len()); for c in ciphers { ciphers_json.push( - c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn) - .await, + c.to_json( + &headers.host, + &headers.user.uuid, + Some(&cipher_sync_data), + CipherSyncType::User, + &mut conn, + ) + .await, ); } @@ -178,11 +192,24 @@ async fn get_cipher(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResul None => err!("Cipher doesn't exist"), }; - if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher + .is_accessible_to_user(&headers.user.uuid, &mut conn) + .await + { err!("Cipher is not owned by user") } - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) + Ok(Json( + cipher + .to_json( + &headers.host, + &headers.user.uuid, + None, + CipherSyncType::User, + &mut conn, + ) + .await, + )) } #[get("/ciphers/<uuid>/admin")] @@ -310,7 +337,12 @@ async fn post_ciphers_create( /// Called when creating a new user-owned cipher. #[post("/ciphers", data = "<data>")] -async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn post_ciphers( + data: JsonUpcase<CipherData>, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { let mut data: CipherData = data.into_inner().data; // The web/browser clients set this field to null as expected, but the @@ -320,9 +352,28 @@ async fn post_ciphers(data: JsonUpcase<CipherData>, headers: Headers, mut conn: data.LastKnownRevisionDate = None; let mut cipher = Cipher::new(data.Type, data.Name.clone()); - update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherCreate).await?; - - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) + update_cipher_from_data( + &mut cipher, + data, + &headers, + false, + &mut conn, + &nt, + UpdateType::SyncCipherCreate, + ) + .await?; + + Ok(Json( + cipher + .to_json( + &headers.host, + &headers.user.uuid, + None, + CipherSyncType::User, + &mut conn, + ) + .await, + )) } /// Enforces the personal ownership policy on user-owned ciphers, if applicable. @@ -382,17 +433,15 @@ pub async fn update_cipher_from_data( err!("The field Notes exceeds the maximum encrypted value length of 10000 characters.") } } - - // Check if this cipher is being transferred from a personal to an organization vault - let transfer_cipher = cipher.organization_uuid.is_none() && data.OrganizationId.is_some(); - if let Some(org_id) = data.OrganizationId { match UserOrganization::find_by_user_and_org(&headers.user.uuid, &org_id, conn).await { None => err!("You don't have permission to add item to organization"), Some(org_user) => { if shared_to_collection || org_user.has_full_access() - || cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await + || cipher + .is_write_accessible_to_user(&headers.user.uuid, conn) + .await { cipher.organization_uuid = Some(org_id); // After some discussion in PR #1329 re-added the user_uuid = None again. @@ -457,9 +506,13 @@ pub async fn update_cipher_from_data( // But, we at least know we do not need to store and return this specific key. fn _clean_cipher_data(mut json_data: Value) -> Value { if json_data.is_array() { - json_data.as_array_mut().unwrap().iter_mut().for_each(|ref mut f| { - f.as_object_mut().unwrap().remove("Response"); - }); + json_data + .as_array_mut() + .unwrap() + .iter_mut() + .for_each(|ref mut f| { + f.as_object_mut().unwrap().remove("Response"); + }); }; json_data } @@ -494,31 +547,23 @@ pub async fn update_cipher_from_data( cipher.reprompt = data.Reprompt; cipher.save(conn).await?; - cipher.move_to_folder(data.FolderId, &headers.user.uuid, conn).await?; - cipher.set_favorite(data.Favorite, &headers.user.uuid, conn).await?; + cipher + .move_to_folder(data.FolderId, &headers.user.uuid, conn) + .await?; + cipher + .set_favorite(data.Favorite, &headers.user.uuid, conn) + .await?; if ut != UpdateType::None { - // Only log events for organizational ciphers - if let Some(org_uuid) = &cipher.organization_uuid { - let event_type = match (&ut, transfer_cipher) { - (UpdateType::SyncCipherCreate, true) => EventType::CipherCreated, - (UpdateType::SyncCipherUpdate, true) => EventType::CipherShared, - (_, _) => EventType::CipherUpdated, - }; - - log_event( - event_type as i32, - &cipher.uuid, - org_uuid, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - } - nt.send_cipher_update(ut, cipher, &cipher.update_users_revision(conn).await, &headers.device.uuid, None, conn) - .await; + nt.send_cipher_update( + ut, + cipher, + &cipher.update_users_revision(conn).await, + &headers.device.uuid, + None, + conn, + ) + .await; } Ok(()) } @@ -579,7 +624,16 @@ async fn post_ciphers_import( cipher_data.FolderId = folder_uuid; let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); - update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await?; + update_cipher_from_data( + &mut cipher, + cipher_data, + &headers, + false, + &mut conn, + &nt, + UpdateType::None, + ) + .await?; } let mut user = headers.user; @@ -643,13 +697,35 @@ async fn put_cipher( // cipher itself, so the user shouldn't need write access to change these. // Interestingly, upstream Bitwarden doesn't properly handle this either. - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher + .is_write_accessible_to_user(&headers.user.uuid, &mut conn) + .await + { err!("Cipher is not write accessible") } - update_cipher_from_data(&mut cipher, data, &headers, false, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?; - - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) + update_cipher_from_data( + &mut cipher, + data, + &headers, + false, + &mut conn, + &nt, + UpdateType::SyncCipherUpdate, + ) + .await?; + + Ok(Json( + cipher + .to_json( + &headers.host, + &headers.user.uuid, + None, + CipherSyncType::User, + &mut conn, + ) + .await, + )) } #[post("/ciphers/<uuid>/partial", data = "<data>")] @@ -689,11 +765,25 @@ async fn put_cipher_partial( } // Move cipher - cipher.move_to_folder(data.FolderId.clone(), &headers.user.uuid, &mut conn).await?; + cipher + .move_to_folder(data.FolderId.clone(), &headers.user.uuid, &mut conn) + .await?; // Update favorite - cipher.set_favorite(Some(data.Favorite), &headers.user.uuid, &mut conn).await?; - - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) + cipher + .set_favorite(Some(data.Favorite), &headers.user.uuid, &mut conn) + .await?; + + Ok(Json( + cipher + .to_json( + &headers.host, + &headers.user.uuid, + None, + CipherSyncType::User, + &mut conn, + ) + .await, + )) } #[derive(Deserialize)] @@ -750,19 +840,29 @@ async fn post_collections_admin( None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher + .is_write_accessible_to_user(&headers.user.uuid, &mut conn) + .await + { err!("Cipher is not write accessible") } let posted_collections: HashSet<String> = data.CollectionIds.iter().cloned().collect(); - let current_collections: HashSet<String> = - cipher.get_collections(headers.user.uuid.clone(), &mut conn).await.iter().cloned().collect(); + let current_collections: HashSet<String> = cipher + .get_collections(headers.user.uuid.clone(), &mut conn) + .await + .iter() + .cloned() + .collect(); for collection in posted_collections.symmetric_difference(&current_collections) { match Collection::find_by_uuid(collection, &mut conn).await { None => err!("Invalid collection ID provided"), Some(collection) => { - if collection.is_writable_by_user(&headers.user.uuid, &mut conn).await { + if collection + .is_writable_by_user(&headers.user.uuid, &mut conn) + .await + { if posted_collections.contains(&collection.uuid) { // Add to collection CollectionCipher::save(&cipher.uuid, &collection.uuid, &mut conn).await?; @@ -786,18 +886,6 @@ async fn post_collections_admin( &mut conn, ) .await; - - log_event( - EventType::CipherUpdatedCollections as i32, - &cipher.uuid, - &cipher.organization_uuid.unwrap(), - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - Ok(()) } @@ -873,7 +961,9 @@ async fn put_cipher_share_selected( }; match shared_cipher_data.Cipher.Id.take() { - Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await?, + Some(id) => { + share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await? + } None => err!("Request missing ids field"), }; } @@ -890,7 +980,10 @@ async fn share_cipher_by_uuid( ) -> JsonResult { let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { Some(cipher) => { - if cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { + if cipher + .is_write_accessible_to_user(&headers.user.uuid, conn) + .await + { cipher } else { err!("Cipher is not write accessible") @@ -906,7 +999,10 @@ async fn share_cipher_by_uuid( match Collection::find_by_uuid_and_org(uuid, organization_uuid, conn).await { None => err!("Invalid collection ID provided"), Some(collection) => { - if collection.is_writable_by_user(&headers.user.uuid, conn).await { + if collection + .is_writable_by_user(&headers.user.uuid, conn) + .await + { CollectionCipher::save(&cipher.uuid, &collection.uuid, conn).await?; shared_to_collection = true; } else { @@ -924,9 +1020,28 @@ async fn share_cipher_by_uuid( UpdateType::SyncCipherCreate }; - update_cipher_from_data(&mut cipher, data.Cipher, headers, shared_to_collection, conn, nt, ut).await?; - - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await)) + update_cipher_from_data( + &mut cipher, + data.Cipher, + headers, + shared_to_collection, + conn, + nt, + ut, + ) + .await?; + + Ok(Json( + cipher + .to_json( + &headers.host, + &headers.user.uuid, + None, + CipherSyncType::User, + conn, + ) + .await, + )) } /// v2 API for downloading an attachment. This just redirects the client to @@ -936,18 +1051,28 @@ async fn share_cipher_by_uuid( /// their object storage service. For self-hosted instances, it basically just /// redirects to the same location as before the v2 API. #[get("/ciphers/<uuid>/attachment/<attachment_id>")] -async fn get_attachment(uuid: &str, attachment_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_attachment( + uuid: &str, + attachment_id: &str, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { let cipher = match Cipher::find_by_uuid(uuid, &mut conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher + .is_accessible_to_user(&headers.user.uuid, &mut conn) + .await + { err!("Cipher is not accessible") } match Attachment::find_by_id(attachment_id, &mut conn).await { - Some(attachment) if uuid == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))), + Some(attachment) if uuid == attachment.cipher_uuid => { + Ok(Json(attachment.to_json(&headers.host))) + } Some(_) => err!("Attachment doesn't belong to cipher"), None => err!("Attachment doesn't exist"), } @@ -983,15 +1108,26 @@ async fn post_attachment_v2( None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher + .is_write_accessible_to_user(&headers.user.uuid, &mut conn) + .await + { err!("Cipher is not write accessible") } let attachment_id = crypto::generate_attachment_id(); let data: AttachmentRequestData = data.into_inner().data; - let attachment = - Attachment::new(attachment_id.clone(), cipher.uuid.clone(), data.FileName, data.FileSize, Some(data.Key)); - attachment.save(&mut conn).await.expect("Error saving attachment"); + let attachment = Attachment::new( + attachment_id.clone(), + cipher.uuid.clone(), + data.FileName, + data.FileSize, + Some(data.Key), + ); + attachment + .save(&mut conn) + .await + .expect("Error saving attachment"); let url = format!("/ciphers/{}/attachment/{}", cipher.uuid, attachment_id); let response_key = match data.AdminRequest { @@ -1035,7 +1171,10 @@ async fn save_attachment( None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, &mut conn).await { + if !cipher + .is_write_accessible_to_user(&headers.user.uuid, &mut conn) + .await + { err!("Cipher is not write accessible") } @@ -1050,7 +1189,8 @@ async fn save_attachment( match CONFIG.user_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &mut conn).await + size_adjust; + let left = (limit_kb * 1024) - Attachment::size_by_user(user_uuid, &mut conn).await + + size_adjust; if left <= 0 { err!("Attachment storage limit reached! Delete some attachments to free up space") } @@ -1062,7 +1202,8 @@ async fn save_attachment( match CONFIG.org_attachment_limit() { Some(0) => err!("Attachments are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &mut conn).await + size_adjust; + let left = (limit_kb * 1024) - Attachment::size_by_org(org_uuid, &mut conn).await + + size_adjust; if left <= 0 { err!("Attachment storage limit reached! Delete some attachments to free up space") } @@ -1087,7 +1228,9 @@ async fn save_attachment( None => crypto::generate_attachment_id(), // Legacy API }; - let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_uuid); + let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()) + .await? + .join(cipher_uuid); let file_path = folder_path.join(&file_id); tokio::fs::create_dir_all(&folder_path).await?; @@ -1106,16 +1249,24 @@ async fn save_attachment( if size != attachment.file_size { // Update the attachment with the actual file size. attachment.file_size = size; - attachment.save(&mut conn).await.expect("Error updating attachment"); + attachment + .save(&mut conn) + .await + .expect("Error updating attachment"); } } else { attachment.delete(&mut conn).await.ok(); - err!(format!("Attachment size mismatch (expected within [{min_size}, {max_size}], got {size})")); + err!(format!( + "Attachment size mismatch (expected within [{min_size}, {max_size}], got {size})" + )); } } else { // Legacy API - let encrypted_filename = data.data.raw_name().map(|s| s.dangerous_unsafe_unsanitized_raw().to_string()); + let encrypted_filename = data + .data + .raw_name() + .map(|s| s.dangerous_unsafe_unsanitized_raw().to_string()); if encrypted_filename.is_none() { err!("No filename provided") @@ -1123,9 +1274,17 @@ async fn save_attachment( if data.key.is_none() { err!("No attachment key provided") } - let attachment = - Attachment::new(file_id, String::from(cipher_uuid), encrypted_filename.unwrap(), size, data.key); - attachment.save(&mut conn).await.expect("Error saving attachment"); + let attachment = Attachment::new( + file_id, + String::from(cipher_uuid), + encrypted_filename.unwrap(), + size, + data.key, + ); + attachment + .save(&mut conn) + .await + .expect("Error saving attachment"); } if let Err(_err) = data.data.persist_to(&file_path).await { @@ -1141,20 +1300,6 @@ async fn save_attachment( &mut conn, ) .await; - - if let Some(org_uuid) = &cipher.organization_uuid { - log_event( - EventType::CipherAttachmentCreated as i32, - &cipher.uuid, - org_uuid, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - } - Ok((cipher, conn)) } @@ -1162,7 +1307,12 @@ async fn save_attachment( /// This route needs a rank specified so that Rocket prioritizes the /// /ciphers/<uuid>/attachment/v2 route, which would otherwise conflict /// with this one. -#[post("/ciphers/<uuid>/attachment/<attachment_id>", format = "multipart/form-data", data = "<data>", rank = 1)] +#[post( + "/ciphers/<uuid>/attachment/<attachment_id>", + format = "multipart/form-data", + data = "<data>", + rank = 1 +)] async fn post_attachment_v2_data( uuid: &str, attachment_id: &str, @@ -1183,7 +1333,11 @@ async fn post_attachment_v2_data( } /// Legacy API for creating an attachment associated with a cipher. -#[post("/ciphers/<uuid>/attachment", format = "multipart/form-data", data = "<data>")] +#[post( + "/ciphers/<uuid>/attachment", + format = "multipart/form-data", + data = "<data>" +)] async fn post_attachment( uuid: &str, data: Form<UploadData<'_>>, @@ -1197,10 +1351,24 @@ async fn post_attachment( let (cipher, mut conn) = save_attachment(attachment, uuid, data, &headers, conn, nt).await?; - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await)) + Ok(Json( + cipher + .to_json( + &headers.host, + &headers.user.uuid, + None, + CipherSyncType::User, + &mut conn, + ) + .await, + )) } -#[post("/ciphers/<uuid>/attachment-admin", format = "multipart/form-data", data = "<data>")] +#[post( + "/ciphers/<uuid>/attachment-admin", + format = "multipart/form-data", + data = "<data>" +)] async fn post_attachment_admin( uuid: &str, data: Form<UploadData<'_>>, @@ -1211,7 +1379,11 @@ async fn post_attachment_admin( post_attachment(uuid, data, headers, conn, nt).await } -#[post("/ciphers/<uuid>/attachment/<attachment_id>/share", format = "multipart/form-data", data = "<data>")] +#[post( + "/ciphers/<uuid>/attachment/<attachment_id>/share", + format = "multipart/form-data", + data = "<data>" +)] async fn post_attachment_share( uuid: &str, attachment_id: &str, @@ -1269,36 +1441,66 @@ async fn delete_attachment_admin( } #[post("/ciphers/<uuid>/delete")] -async fn delete_cipher_post(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn delete_cipher_post( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await // permanent delete } #[post("/ciphers/<uuid>/delete-admin")] -async fn delete_cipher_post_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn delete_cipher_post_admin( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await // permanent delete } #[put("/ciphers/<uuid>/delete")] -async fn delete_cipher_put(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn delete_cipher_put( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { _delete_cipher_by_uuid(uuid, &headers, &mut conn, true, &nt).await // soft delete } #[put("/ciphers/<uuid>/delete-admin")] -async fn delete_cipher_put_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn delete_cipher_put_admin( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { _delete_cipher_by_uuid(uuid, &headers, &mut conn, true, &nt).await } #[delete("/ciphers/<uuid>")] -async fn delete_cipher(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn delete_cipher( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await // permanent delete } #[delete("/ciphers/<uuid>/admin")] -async fn delete_cipher_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> EmptyResult { +async fn delete_cipher_admin( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> EmptyResult { _delete_cipher_by_uuid(uuid, &headers, &mut conn, false, &nt).await // permanent delete } @@ -1364,12 +1566,22 @@ async fn delete_cipher_selected_put_admin( } #[put("/ciphers/<uuid>/restore")] -async fn restore_cipher_put(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn restore_cipher_put( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { _restore_cipher_by_uuid(uuid, &headers, &mut conn, &nt).await } #[put("/ciphers/<uuid>/restore-admin")] -async fn restore_cipher_put_admin(uuid: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn restore_cipher_put_admin( + uuid: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { _restore_cipher_by_uuid(uuid, &headers, &mut conn, &nt).await } @@ -1422,7 +1634,9 @@ async fn move_cipher_selected( } // Move cipher - cipher.move_to_folder(data.FolderId.clone(), &user_uuid, &mut conn).await?; + cipher + .move_to_folder(data.FolderId.clone(), &user_uuid, &mut conn) + .await?; nt.send_cipher_update( UpdateType::SyncCipherUpdate, @@ -1474,24 +1688,14 @@ async fn delete_all( match organization { Some(org_data) => { // Organization ID in query params, purging organization vault - match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &mut conn).await { + match UserOrganization::find_by_user_and_org(&user.uuid, &org_data.org_id, &mut conn) + .await + { None => err!("You don't have permission to purge the organization vault"), Some(user_org) => { if user_org.atype == UserOrgType::Owner { Cipher::delete_all_by_organization(&org_data.org_id, &mut conn).await?; nt.send_user_update(UpdateType::SyncVault, &user).await; - - log_event( - EventType::OrganizationPurgedVault as i32, - &org_data.org_id, - &org_data.org_id, - user.uuid, - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - Ok(()) } else { err!("You don't have permission to purge the organization vault"); @@ -1531,7 +1735,10 @@ async fn _delete_cipher_by_uuid( None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { + if !cipher + .is_write_accessible_to_user(&headers.user.uuid, conn) + .await + { err!("Cipher can't be deleted by user") } @@ -1559,25 +1766,6 @@ async fn _delete_cipher_by_uuid( ) .await; } - - if let Some(org_uuid) = cipher.organization_uuid { - let event_type = match soft_delete { - true => EventType::CipherSoftDeleted as i32, - false => EventType::CipherDeleted as i32, - }; - - log_event( - event_type, - &cipher.uuid, - &org_uuid, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - } - Ok(()) } @@ -1599,7 +1787,9 @@ async fn _delete_multiple_ciphers( }; for uuid in uuids { - if let error @ Err(_) = _delete_cipher_by_uuid(uuid, &headers, &mut conn, soft_delete, &nt).await { + if let error @ Err(_) = + _delete_cipher_by_uuid(uuid, &headers, &mut conn, soft_delete, &nt).await + { return error; }; } @@ -1607,13 +1797,21 @@ async fn _delete_multiple_ciphers( Ok(()) } -async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbConn, nt: &Notify<'_>) -> JsonResult { +async fn _restore_cipher_by_uuid( + uuid: &str, + headers: &Headers, + conn: &mut DbConn, + nt: &Notify<'_>, +) -> JsonResult { let mut cipher = match Cipher::find_by_uuid(uuid, conn).await { Some(cipher) => cipher, None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { + if !cipher + .is_write_accessible_to_user(&headers.user.uuid, conn) + .await + { err!("Cipher can't be restored by user") } @@ -1629,21 +1827,17 @@ async fn _restore_cipher_by_uuid(uuid: &str, headers: &Headers, conn: &mut DbCon conn, ) .await; - - if let Some(org_uuid) = &cipher.organization_uuid { - log_event( - EventType::CipherRestored as i32, - &cipher.uuid.clone(), - org_uuid, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - } - - Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await)) + Ok(Json( + cipher + .to_json( + &headers.host, + &headers.user.uuid, + None, + CipherSyncType::User, + conn, + ) + .await, + )) } async fn _restore_multiple_ciphers( @@ -1698,7 +1892,10 @@ async fn _delete_cipher_attachment_by_id( None => err!("Cipher doesn't exist"), }; - if !cipher.is_write_accessible_to_user(&headers.user.uuid, conn).await { + if !cipher + .is_write_accessible_to_user(&headers.user.uuid, conn) + .await + { err!("Cipher cannot be deleted by user") } @@ -1713,19 +1910,6 @@ async fn _delete_cipher_attachment_by_id( conn, ) .await; - - if let Some(org_uuid) = cipher.organization_uuid { - log_event( - EventType::CipherAttachmentDeleted as i32, - &cipher.uuid, - &org_uuid, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - } Ok(()) } @@ -1758,10 +1942,16 @@ impl CipherSyncData { // User Sync supports Folders and Favorites CipherSyncType::User => { // Generate a HashMap with the Cipher UUID as key and the Folder UUID as value - cipher_folders = FolderCipher::find_by_user(user_uuid, conn).await.into_iter().collect(); + cipher_folders = FolderCipher::find_by_user(user_uuid, conn) + .await + .into_iter() + .collect(); // Generate a HashSet of all the Cipher UUID's which are marked as favorite - cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn).await.into_iter().collect(); + cipher_favorites = Favorite::get_all_cipher_uuid_by_user(user_uuid, conn) + .await + .into_iter() + .collect(); } // Organization Sync does not support Folders and Favorites. // If these are set, it will cause issues in the web-vault. @@ -1773,44 +1963,61 @@ impl CipherSyncData { // Generate a list of Cipher UUID's containing a Vec with one or more Attachment records let user_org_uuids = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await; - let attachments = Attachment::find_all_by_user_and_orgs(user_uuid, &user_org_uuids, conn).await; - let mut cipher_attachments: HashMap<String, Vec<Attachment>> = HashMap::with_capacity(attachments.len()); + let attachments = + Attachment::find_all_by_user_and_orgs(user_uuid, &user_org_uuids, conn).await; + let mut cipher_attachments: HashMap<String, Vec<Attachment>> = + HashMap::with_capacity(attachments.len()); for attachment in attachments { - cipher_attachments.entry(attachment.cipher_uuid.clone()).or_default().push(attachment); + cipher_attachments + .entry(attachment.cipher_uuid.clone()) + .or_default() + .push(attachment); } // Generate a HashMap with the Cipher UUID as key and one or more Collection UUID's - let user_cipher_collections = Cipher::get_collections_with_cipher_by_user(user_uuid.to_string(), conn).await; + let user_cipher_collections = + Cipher::get_collections_with_cipher_by_user(user_uuid.to_string(), conn).await; let mut cipher_collections: HashMap<String, Vec<String>> = HashMap::with_capacity(user_cipher_collections.len()); for (cipher, collection) in user_cipher_collections { - cipher_collections.entry(cipher).or_default().push(collection); + cipher_collections + .entry(cipher) + .or_default() + .push(collection); } // Generate a HashMap with the Organization UUID as key and the UserOrganization record - let user_organizations: HashMap<String, UserOrganization> = UserOrganization::find_by_user(user_uuid, conn) - .await - .into_iter() - .map(|uo| (uo.org_uuid.clone(), uo)) - .collect(); + let user_organizations: HashMap<String, UserOrganization> = + UserOrganization::find_by_user(user_uuid, conn) + .await + .into_iter() + .map(|uo| (uo.org_uuid.clone(), uo)) + .collect(); // Generate a HashMap with the User_Collections UUID as key and the CollectionUser record - let user_collections: HashMap<String, CollectionUser> = CollectionUser::find_by_user(user_uuid, conn) - .await - .into_iter() - .map(|uc| (uc.collection_uuid.clone(), uc)) - .collect(); + let user_collections: HashMap<String, CollectionUser> = + CollectionUser::find_by_user(user_uuid, conn) + .await + .into_iter() + .map(|uc| (uc.collection_uuid.clone(), uc)) + .collect(); // Generate a HashMap with the collections_uuid as key and the CollectionGroup record - let user_collections_groups: HashMap<String, CollectionGroup> = CollectionGroup::find_by_user(user_uuid, conn) - .await - .into_iter() - .map(|collection_group| (collection_group.collections_uuid.clone(), collection_group)) - .collect(); + let user_collections_groups: HashMap<String, CollectionGroup> = + CollectionGroup::find_by_user(user_uuid, conn) + .await + .into_iter() + .map(|collection_group| { + (collection_group.collections_uuid.clone(), collection_group) + }) + .collect(); // Get all organizations that the user has full access to via group assignment let user_group_full_access_for_organizations: HashSet<String> = - Group::gather_user_organizations_full_access(user_uuid, conn).await.into_iter().collect(); + Group::gather_user_organizations_full_access(user_uuid, conn) + .await + .into_iter() + .collect(); Self { cipher_attachments, diff --git a/src/api/core/emergency_access.rs b/src/api/core/emergency_access.rs @@ -1,4 +1,4 @@ -use chrono::{Duration, Utc}; +use chrono::Utc; use rocket::{serde::json::Json, Route}; use serde_json::Value; @@ -8,7 +8,7 @@ use crate::{ EmptyResult, JsonResult, JsonUpcase, NumberOrString, }, auth::{decode_emergency_access_invite, Headers}, - db::{models::*, DbConn, DbPool}, + db::{models::*, DbConn}, mail, CONFIG, }; @@ -40,7 +40,8 @@ pub fn routes() -> Vec<Route> { async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; - let emergency_access_list = EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await; + let emergency_access_list = + EmergencyAccess::find_all_by_grantor_uuid(&headers.user.uuid, &mut conn).await; let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len()); for ea in emergency_access_list { emergency_access_list_json.push(ea.to_json_grantee_details(&mut conn).await); @@ -57,7 +58,8 @@ async fn get_contacts(headers: Headers, mut conn: DbConn) -> JsonResult { async fn get_grantees(headers: Headers, mut conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; - let emergency_access_list = EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await; + let emergency_access_list = + EmergencyAccess::find_all_by_grantee_uuid(&headers.user.uuid, &mut conn).await; let mut emergency_access_list_json = Vec::with_capacity(emergency_access_list.len()); for ea in emergency_access_list { emergency_access_list_json.push(ea.to_json_grantor_details(&mut conn).await); @@ -75,7 +77,9 @@ async fn get_emergency_access(emer_id: &str, mut conn: DbConn) -> JsonResult { check_emergency_access_allowed()?; match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { - Some(emergency_access) => Ok(Json(emergency_access.to_json_grantee_details(&mut conn).await)), + Some(emergency_access) => Ok(Json( + emergency_access.to_json_grantee_details(&mut conn).await, + )), None => err!("Emergency access not valid."), } } @@ -93,7 +97,11 @@ struct EmergencyAccessUpdateData { } #[put("/emergency-access/<emer_id>", data = "<data>")] -async fn put_emergency_access(emer_id: &str, data: JsonUpcase<EmergencyAccessUpdateData>, conn: DbConn) -> JsonResult { +async fn put_emergency_access( + emer_id: &str, + data: JsonUpcase<EmergencyAccessUpdateData>, + conn: DbConn, +) -> JsonResult { post_emergency_access(emer_id, data, conn).await } @@ -139,7 +147,9 @@ async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { Some(emer) => { - if emer.grantor_uuid != grantor_user.uuid && emer.grantee_uuid != Some(grantor_user.uuid) { + if emer.grantor_uuid != grantor_user.uuid + && emer.grantee_uuid != Some(grantor_user.uuid) + { err!("Emergency access not valid.") } emer @@ -151,7 +161,11 @@ async fn delete_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo } #[post("/emergency-access/<emer_id>/delete")] -async fn post_delete_emergency_access(emer_id: &str, headers: Headers, conn: DbConn) -> EmptyResult { +async fn post_delete_emergency_access( + emer_id: &str, + headers: Headers, + conn: DbConn, +) -> EmptyResult { delete_emergency_access(emer_id, headers, conn).await } @@ -168,7 +182,11 @@ struct EmergencyAccessInviteData { } #[post("/emergency-access/invite", data = "<data>")] -async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn send_invite( + data: JsonUpcase<EmergencyAccessInviteData>, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { check_emergency_access_allowed()?; let data: EmergencyAccessInviteData = data.into_inner().data; @@ -220,16 +238,26 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade .await .is_some() { - err!(format!("Grantee user already invited: {}", &grantee_user.email)) - } - - let mut new_emergency_access = - EmergencyAccess::new(grantor_user.uuid, grantee_user.email, emergency_access_status, new_type, wait_time_days); + err!(format!( + "Grantee user already invited: {}", + &grantee_user.email + )) + } + + let mut new_emergency_access = EmergencyAccess::new( + grantor_user.uuid, + grantee_user.email, + emergency_access_status, + new_type, + wait_time_days, + ); new_emergency_access.save(&mut conn).await?; if CONFIG.mail_enabled() { mail::send_emergency_access_invite( - &new_emergency_access.email.expect("Grantee email does not exists"), + &new_emergency_access + .email + .expect("Grantee email does not exists"), &grantee_user.uuid, &new_emergency_access.uuid, &grantor_user.name, @@ -239,7 +267,14 @@ async fn send_invite(data: JsonUpcase<EmergencyAccessInviteData>, headers: Heade } else { // Automatically mark user as accepted if no email invites match User::find_by_mail(&email, &mut conn).await { - Some(user) => match accept_invite_process(&user.uuid, &mut new_emergency_access, &email, &mut conn).await { + Some(user) => match accept_invite_process( + &user.uuid, + &mut new_emergency_access, + &email, + &mut conn, + ) + .await + { Ok(v) => v, Err(e) => err!(e.to_string()), }, @@ -295,7 +330,9 @@ async fn resend_invite(emer_id: &str, headers: Headers, mut conn: DbConn) -> Emp } // Automatically mark user as accepted if no email invites - match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn).await { + match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &email, &mut conn) + .await + { Ok(v) => v, Err(e) => err!(e.to_string()), } @@ -311,7 +348,12 @@ struct AcceptData { } #[post("/emergency-access/<emer_id>/accept", data = "<data>")] -async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn accept_invite( + emer_id: &str, + data: JsonUpcase<AcceptData>, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { check_emergency_access_allowed()?; let data: AcceptData = data.into_inner().data; @@ -347,13 +389,21 @@ async fn accept_invite(emer_id: &str, data: JsonUpcase<AcceptData>, headers: Hea && grantor_user.name == claims.grantor_name && grantor_user.email == claims.grantor_email { - match accept_invite_process(&grantee_user.uuid, &mut emergency_access, &grantee_user.email, &mut conn).await { + match accept_invite_process( + &grantee_user.uuid, + &mut emergency_access, + &grantee_user.email, + &mut conn, + ) + .await + { Ok(v) => v, Err(e) => err!(e.to_string()), } if CONFIG.mail_enabled() { - mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email).await?; + mail::send_emergency_access_invite_accepted(&grantor_user.email, &grantee_user.email) + .await?; } Ok(()) @@ -368,7 +418,8 @@ async fn accept_invite_process( grantee_email: &str, conn: &mut DbConn, ) -> EmptyResult { - if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email { + if emergency_access.email.is_none() || emergency_access.email.as_ref().unwrap() != grantee_email + { err!("User email does not match invite."); } @@ -430,7 +481,8 @@ async fn confirm_emergency_access( emergency_access.save(&mut conn).await?; if CONFIG.mail_enabled() { - mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name).await?; + mail::send_emergency_access_invite_confirmed(&grantee_user.email, &grantor_user.name) + .await?; } Ok(Json(emergency_access.to_json())) } else { @@ -443,7 +495,11 @@ async fn confirm_emergency_access( // region access emergency access #[post("/emergency-access/<emer_id>/initiate")] -async fn initiate_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn initiate_emergency_access( + emer_id: &str, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { check_emergency_access_allowed()?; let initiating_user = headers.user; @@ -512,7 +568,8 @@ async fn approve_emergency_access(emer_id: &str, headers: Headers, mut conn: DbC emergency_access.save(&mut conn).await?; if CONFIG.mail_enabled() { - mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name).await?; + mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name) + .await?; } Ok(Json(emergency_access.to_json())) } else { @@ -551,7 +608,8 @@ async fn reject_emergency_access(emer_id: &str, headers: Headers, mut conn: DbCo emergency_access.save(&mut conn).await?; if CONFIG.mail_enabled() { - mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name).await?; + mail::send_emergency_access_recovery_rejected(&grantee_user.email, &grantor_user.name) + .await?; } Ok(Json(emergency_access.to_json())) } else { @@ -572,12 +630,21 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn None => err!("Emergency access not valid."), }; - if !is_valid_request(&emergency_access, &headers.user.uuid, EmergencyAccessType::View) { + if !is_valid_request( + &emergency_access, + &headers.user.uuid, + EmergencyAccessType::View, + ) { err!("Emergency access not valid.") } let ciphers = Cipher::find_owned_by_user(&emergency_access.grantor_uuid, &mut conn).await; - let cipher_sync_data = CipherSyncData::new(&emergency_access.grantor_uuid, CipherSyncType::User, &mut conn).await; + let cipher_sync_data = CipherSyncData::new( + &emergency_access.grantor_uuid, + CipherSyncType::User, + &mut conn, + ) + .await; let mut ciphers_json = Vec::with_capacity(ciphers.len()); for c in ciphers { @@ -601,7 +668,11 @@ async fn view_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn } #[post("/emergency-access/<emer_id>/takeover")] -async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn takeover_emergency_access( + emer_id: &str, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { check_emergency_access_allowed()?; let requesting_user = headers.user; @@ -610,7 +681,11 @@ async fn takeover_emergency_access(emer_id: &str, headers: Headers, mut conn: Db None => err!("Emergency access not valid."), }; - if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { + if !is_valid_request( + &emergency_access, + &requesting_user.uuid, + EmergencyAccessType::Takeover, + ) { err!("Emergency access not valid.") } @@ -657,11 +732,16 @@ async fn password_emergency_access( None => err!("Emergency access not valid."), }; - if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { + if !is_valid_request( + &emergency_access, + &requesting_user.uuid, + EmergencyAccessType::Takeover, + ) { err!("Emergency access not valid.") } - let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await { + let mut grantor_user = match User::find_by_uuid(&emergency_access.grantor_uuid, &mut conn).await + { Some(user) => user, None => err!("Grantor user not found."), }; @@ -685,14 +765,22 @@ async fn password_emergency_access( // endregion #[get("/emergency-access/<emer_id>/policies")] -async fn policies_emergency_access(emer_id: &str, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn policies_emergency_access( + emer_id: &str, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { let requesting_user = headers.user; let emergency_access = match EmergencyAccess::find_by_uuid(emer_id, &mut conn).await { Some(emer) => emer, None => err!("Emergency access not valid."), }; - if !is_valid_request(&emergency_access, &requesting_user.uuid, EmergencyAccessType::Takeover) { + if !is_valid_request( + &emergency_access, + &requesting_user.uuid, + EmergencyAccessType::Takeover, + ) { err!("Emergency access not valid.") } @@ -728,117 +816,3 @@ fn check_emergency_access_allowed() -> EmptyResult { } Ok(()) } - -pub async fn emergency_request_timeout_job(pool: DbPool) { - debug!("Start emergency_request_timeout_job"); - if !CONFIG.emergency_access_allowed() { - return; - } - - if let Ok(mut conn) = pool.get().await { - let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await; - - if emergency_access_list.is_empty() { - debug!("No emergency request timeout to approve"); - } - - let now = Utc::now().naive_utc(); - for mut emer in emergency_access_list { - // The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None) - let recovery_allowed_at = - emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days)); - if recovery_allowed_at.le(&now) { - // Only update the access status - // Updating the whole record could cause issues when the emergency_notification_reminder_job is also active - emer.update_access_status_and_save(EmergencyAccessStatus::RecoveryApproved as i32, &now, &mut conn) - .await - .expect("Unable to update emergency access status"); - - if CONFIG.mail_enabled() { - // get grantor user to send Accepted email - let grantor_user = - User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found"); - - // get grantee user to send Accepted email - let grantee_user = - User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn) - .await - .expect("Grantee user not found"); - - mail::send_emergency_access_recovery_timed_out( - &grantor_user.email, - &grantee_user.name, - emer.get_type_as_str(), - ) - .await - .expect("Error on sending email"); - - mail::send_emergency_access_recovery_approved(&grantee_user.email, &grantor_user.name) - .await - .expect("Error on sending email"); - } - } - } - } else { - error!("Failed to get DB connection while searching emergency request timed out") - } -} - -pub async fn emergency_notification_reminder_job(pool: DbPool) { - debug!("Start emergency_notification_reminder_job"); - if !CONFIG.emergency_access_allowed() { - return; - } - - if let Ok(mut conn) = pool.get().await { - let emergency_access_list = EmergencyAccess::find_all_recoveries_initiated(&mut conn).await; - - if emergency_access_list.is_empty() { - debug!("No emergency request reminder notification to send"); - } - - let now = Utc::now().naive_utc(); - for mut emer in emergency_access_list { - // The find_all_recoveries_initiated already checks if the recovery_initiated_at is not null (None) - // Calculate the day before the recovery will become active - let final_recovery_reminder_at = - emer.recovery_initiated_at.unwrap() + Duration::days(i64::from(emer.wait_time_days - 1)); - // Calculate if a day has passed since the previous notification, else no notification has been sent before - let next_recovery_reminder_at = if let Some(last_notification_at) = emer.last_notification_at { - last_notification_at + Duration::days(1) - } else { - now - }; - if final_recovery_reminder_at.le(&now) && next_recovery_reminder_at.le(&now) { - // Only update the last notification date - // Updating the whole record could cause issues when the emergency_request_timeout_job is also active - emer.update_last_notification_date_and_save(&now, &mut conn) - .await - .expect("Unable to update emergency access notification date"); - - if CONFIG.mail_enabled() { - // get grantor user to send Accepted email - let grantor_user = - User::find_by_uuid(&emer.grantor_uuid, &mut conn).await.expect("Grantor user not found"); - - // get grantee user to send Accepted email - let grantee_user = - User::find_by_uuid(&emer.grantee_uuid.clone().expect("Grantee user invalid"), &mut conn) - .await - .expect("Grantee user not found"); - - mail::send_emergency_access_recovery_reminder( - &grantor_user.email, - &grantee_user.name, - emer.get_type_as_str(), - "1", // This notification is only triggered one day before the activation - ) - .await - .expect("Error on sending email"); - } - } - } - } else { - error!("Failed to get DB connection while searching emergency notification reminder") - } -} diff --git a/src/api/core/events.rs b/src/api/core/events.rs @@ -1,19 +1,10 @@ -use std::net::IpAddr; - -use chrono::NaiveDateTime; -use rocket::{form::FromForm, serde::json::Json, Route}; -use serde_json::Value; - use crate::{ api::{EmptyResult, JsonResult, JsonUpcaseVec}, auth::{AdminHeaders, Headers}, - db::{ - models::{Cipher, Event, UserOrganization}, - DbConn, DbPool, - }, - util::parse_date, - CONFIG, + db::{models::Event, DbConn}, }; +use rocket::{form::FromForm, serde::json::Json, Route}; +use serde_json::Value; /// ############################################################################################################### /// /api routes @@ -22,7 +13,7 @@ pub fn routes() -> Vec<Route> { } #[derive(FromForm)] -#[allow(non_snake_case)] +#[allow(non_snake_case, dead_code)] struct EventRange { start: String, end: String, @@ -31,27 +22,17 @@ struct EventRange { } // Upstream: https://github.com/bitwarden/server/blob/9ecf69d9cabce732cf2c57976dd9afa5728578fb/src/Api/Controllers/EventsController.cs#LL84C35-L84C41 +#[allow(unused_variables)] #[get("/organizations/<org_id>/events?<data..>")] -async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +fn get_org_events( + org_id: &str, + data: EventRange, + _headers: AdminHeaders, + _conn: DbConn, +) -> JsonResult { // Return an empty vec when we org events are disabled. // This prevents client errors - let events_json: Vec<Value> = if !CONFIG.org_events_enabled() { - Vec::with_capacity(0) - } else { - let start_date = parse_date(&data.start); - let end_date = if let Some(before_date) = &data.continuation_token { - parse_date(before_date) - } else { - parse_date(&data.end) - }; - - Event::find_by_organization_uuid(org_id, &start_date, &end_date, &mut conn) - .await - .iter() - .map(|e| e.to_json()) - .collect() - }; - + let events_json = Vec::new(); Ok(Json(json!({ "Data": events_json, "Object": "list", @@ -59,31 +40,17 @@ async fn get_org_events(org_id: &str, data: EventRange, _headers: AdminHeaders, }))) } +#[allow(unused_variables)] #[get("/ciphers/<cipher_id>/events?<data..>")] -async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, mut conn: DbConn) -> JsonResult { +fn get_cipher_events( + cipher_id: &str, + data: EventRange, + _headers: Headers, + _conn: DbConn, +) -> JsonResult { // Return an empty vec when we org events are disabled. // This prevents client errors - let events_json: Vec<Value> = if !CONFIG.org_events_enabled() { - Vec::with_capacity(0) - } else { - let mut events_json = Vec::with_capacity(0); - if UserOrganization::user_has_ge_admin_access_to_cipher(&headers.user.uuid, cipher_id, &mut conn).await { - let start_date = parse_date(&data.start); - let end_date = if let Some(before_date) = &data.continuation_token { - parse_date(before_date) - } else { - parse_date(&data.end) - }; - - events_json = Event::find_by_cipher_uuid(cipher_id, &start_date, &end_date, &mut conn) - .await - .iter() - .map(|e| e.to_json()) - .collect() - } - events_json - }; - + let events_json = Vec::new(); Ok(Json(json!({ "Data": events_json, "Object": "list", @@ -91,33 +58,18 @@ async fn get_cipher_events(cipher_id: &str, data: EventRange, headers: Headers, }))) } +#[allow(unused_variables)] #[get("/organizations/<org_id>/users/<user_org_id>/events?<data..>")] -async fn get_user_events( +fn get_user_events( org_id: &str, user_org_id: &str, data: EventRange, _headers: AdminHeaders, - mut conn: DbConn, + _conn: DbConn, ) -> JsonResult { // Return an empty vec when we org events are disabled. // This prevents client errors - let events_json: Vec<Value> = if !CONFIG.org_events_enabled() { - Vec::with_capacity(0) - } else { - let start_date = parse_date(&data.start); - let end_date = if let Some(before_date) = &data.continuation_token { - parse_date(before_date) - } else { - parse_date(&data.end) - }; - - Event::find_by_org_and_user_org(org_id, user_org_id, &start_date, &end_date, &mut conn) - .await - .iter() - .map(|e| e.to_json()) - .collect() - }; - + let events_json: Vec<Value> = Vec::new(); Ok(Json(json!({ "Data": events_json, "Object": "list", @@ -147,190 +99,17 @@ pub fn main_routes() -> Vec<Route> { #[derive(Deserialize, Debug)] #[allow(non_snake_case)] -struct EventCollection { - // Mandatory - Type: i32, - Date: String, - - // Optional - CipherId: Option<String>, - OrganizationId: Option<String>, -} +struct EventCollection; // Upstream: // https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Events/Controllers/CollectController.cs // https://github.com/bitwarden/server/blob/8a22c0479e987e756ce7412c48a732f9002f0a2d/src/Core/Services/Implementations/EventService.cs +#[allow(unused_variables)] #[post("/collect", format = "application/json", data = "<data>")] -async fn post_events_collect(data: JsonUpcaseVec<EventCollection>, headers: Headers, mut conn: DbConn) -> EmptyResult { - if !CONFIG.org_events_enabled() { - return Ok(()); - } - - for event in data.iter().map(|d| &d.data) { - let event_date = parse_date(&event.Date); - match event.Type { - 1000..=1099 => { - _log_user_event( - event.Type, - &headers.user.uuid, - headers.device.atype, - Some(event_date), - &headers.ip.ip, - &mut conn, - ) - .await; - } - 1600..=1699 => { - if let Some(org_uuid) = &event.OrganizationId { - _log_event( - event.Type, - org_uuid, - org_uuid, - &headers.user.uuid, - headers.device.atype, - Some(event_date), - &headers.ip.ip, - &mut conn, - ) - .await; - } - } - _ => { - if let Some(cipher_uuid) = &event.CipherId { - if let Some(cipher) = Cipher::find_by_uuid(cipher_uuid, &mut conn).await { - if let Some(org_uuid) = cipher.organization_uuid { - _log_event( - event.Type, - cipher_uuid, - &org_uuid, - &headers.user.uuid, - headers.device.atype, - Some(event_date), - &headers.ip.ip, - &mut conn, - ) - .await; - } - } - } - } - } - } +fn post_events_collect( + data: JsonUpcaseVec<EventCollection>, + _headers: Headers, + _conn: DbConn, +) -> EmptyResult { Ok(()) } - -pub async fn log_user_event(event_type: i32, user_uuid: &str, device_type: i32, ip: &IpAddr, conn: &mut DbConn) { - if !CONFIG.org_events_enabled() { - return; - } - _log_user_event(event_type, user_uuid, device_type, None, ip, conn).await; -} - -async fn _log_user_event( - event_type: i32, - user_uuid: &str, - device_type: i32, - event_date: Option<NaiveDateTime>, - ip: &IpAddr, - conn: &mut DbConn, -) { - let orgs = UserOrganization::get_org_uuid_by_user(user_uuid, conn).await; - let mut events: Vec<Event> = Vec::with_capacity(orgs.len() + 1); // We need an event per org and one without an org - - // Upstream saves the event also without any org_uuid. - let mut event = Event::new(event_type, event_date); - event.user_uuid = Some(String::from(user_uuid)); - event.act_user_uuid = Some(String::from(user_uuid)); - event.device_type = Some(device_type); - event.ip_address = Some(ip.to_string()); - events.push(event); - - // For each org a user is a member of store these events per org - for org_uuid in orgs { - let mut event = Event::new(event_type, event_date); - event.user_uuid = Some(String::from(user_uuid)); - event.org_uuid = Some(org_uuid); - event.act_user_uuid = Some(String::from(user_uuid)); - event.device_type = Some(device_type); - event.ip_address = Some(ip.to_string()); - events.push(event); - } - - Event::save_user_event(events, conn).await.unwrap_or(()); -} - -pub async fn log_event( - event_type: i32, - source_uuid: &str, - org_uuid: &str, - act_user_uuid: String, - device_type: i32, - ip: &IpAddr, - conn: &mut DbConn, -) { - if !CONFIG.org_events_enabled() { - return; - } - _log_event(event_type, source_uuid, org_uuid, &act_user_uuid, device_type, None, ip, conn).await; -} - -#[allow(clippy::too_many_arguments)] -async fn _log_event( - event_type: i32, - source_uuid: &str, - org_uuid: &str, - act_user_uuid: &str, - device_type: i32, - event_date: Option<NaiveDateTime>, - ip: &IpAddr, - conn: &mut DbConn, -) { - // Create a new empty event - let mut event = Event::new(event_type, event_date); - match event_type { - // 1000..=1099 Are user events, they need to be logged via log_user_event() - // Collection Events - 1100..=1199 => { - event.cipher_uuid = Some(String::from(source_uuid)); - } - // Collection Events - 1300..=1399 => { - event.collection_uuid = Some(String::from(source_uuid)); - } - // Group Events - 1400..=1499 => { - event.group_uuid = Some(String::from(source_uuid)); - } - // Org User Events - 1500..=1599 => { - event.org_user_uuid = Some(String::from(source_uuid)); - } - // 1600..=1699 Are organizational events, and they do not need the source_uuid - // Policy Events - 1700..=1799 => { - event.policy_uuid = Some(String::from(source_uuid)); - } - // Ignore others - _ => {} - } - - event.org_uuid = Some(String::from(org_uuid)); - event.act_user_uuid = Some(String::from(act_user_uuid)); - event.device_type = Some(device_type); - event.ip_address = Some(ip.to_string()); - event.save(conn).await.unwrap_or(()); -} - -pub async fn event_cleanup_job(pool: DbPool) { - debug!("Start events cleanup job"); - if CONFIG.events_days_retain().is_none() { - debug!("events_days_retain is not configured, abort"); - return; - } - - if let Ok(mut conn) = pool.get().await { - Event::clean_events(&mut conn).await.ok(); - } else { - error!("Failed to get DB connection while trying to cleanup the events table") - } -} diff --git a/src/api/core/mod.rs b/src/api/core/mod.rs @@ -7,14 +7,7 @@ mod organizations; mod public; mod sends; pub mod two_factor; - -pub use accounts::purge_auth_requests; -pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType}; -pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job}; -pub use events::{event_cleanup_job, log_event, log_user_event}; -pub use sends::purge_sends; -pub use two_factor::send_incomplete_2fa_notifications; - +pub use ciphers::{CipherData, CipherSyncData, CipherSyncType}; pub fn routes() -> Vec<Route> { let mut eq_domains_routes = routes![get_eq_domains, post_eq_domains, put_eq_domains]; let mut hibp_routes = routes![hibp_breach]; @@ -51,11 +44,9 @@ use rocket::{serde::json::Json, Catcher, Route}; use serde_json::Value; use crate::{ - api::{JsonResult, JsonUpcase, Notify, UpdateType}, + api::{JsonResult, JsonUpcase, Notify}, auth::Headers, db::DbConn, - error::Error, - util::get_reqwest_client, }; #[derive(Serialize, Deserialize, Debug)] @@ -109,7 +100,7 @@ async fn post_eq_domains( data: JsonUpcase<EquivDomainData>, headers: Headers, mut conn: DbConn, - nt: Notify<'_>, + _nt: Notify<'_>, ) -> JsonResult { let data: EquivDomainData = data.into_inner().data; @@ -123,9 +114,6 @@ async fn post_eq_domains( user.equivalent_domains = to_string(&equivalent_domains).unwrap_or_else(|_| "[]".to_string()); user.save(&mut conn).await?; - - nt.send_user_update(UpdateType::SyncSettings, &user).await; - Ok(Json(json!({}))) } @@ -138,40 +126,10 @@ async fn put_eq_domains( ) -> JsonResult { post_eq_domains(data, headers, conn, nt).await } - +#[allow(unused_variables)] #[get("/hibp/breach?<username>")] -async fn hibp_breach(username: &str) -> JsonResult { - let url = format!( - "https://haveibeenpwned.com/api/v3/breachedaccount/{username}?truncateResponse=false&includeUnverified=false" - ); - - if let Some(api_key) = crate::CONFIG.hibp_api_key() { - let hibp_client = get_reqwest_client(); - - let res = hibp_client.get(&url).header("hibp-api-key", api_key).send().await?; - - // If we get a 404, return a 404, it means no breached accounts - if res.status() == 404 { - return Err(Error::empty().with_code(404)); - } - - let value: Value = res.error_for_status()?.json().await?; - Ok(Json(value)) - } else { - Ok(Json(json!([{ - "Name": "HaveIBeenPwned", - "Title": "Manual HIBP Check", - "Domain": "haveibeenpwned.com", - "BreachDate": "2019-08-18T00:00:00Z", - "AddedDate": "2019-08-18T00:00:00Z", - "Description": format!("Go to: <a href=\"https://haveibeenpwned.com/account/{username}\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/account/{username}</a> for a manual check.<br/><br/>HaveIBeenPwned API key not set!<br/>Go to <a href=\"https://haveibeenpwned.com/API/Key\" target=\"_blank\" rel=\"noreferrer\">https://haveibeenpwned.com/API/Key</a> to purchase an API key from HaveIBeenPwned.<br/><br/>"), - "LogoPath": "vw_static/hibp.png", - "PwnCount": 0, - "DataClasses": [ - "Error - No API key set!" - ] - }]))) - } +fn hibp_breach(username: &str) -> JsonResult { + Err(crate::error::Error::empty().with_code(404)) } // We use DbConn here to let the alive healthcheck also verify the database connection. @@ -187,7 +145,7 @@ pub fn now() -> Json<String> { #[get("/version")] fn version() -> Json<&'static str> { - Json(crate::VERSION.unwrap_or_default()) + Json(crate::VERSION) } #[get("/config")] diff --git a/src/api/core/organizations.rs b/src/api/core/organizations.rs @@ -1,20 +1,22 @@ -use num_traits::FromPrimitive; -use rocket::serde::json::Json; -use rocket::Route; -use serde_json::Value; - use crate::{ api::{ - core::{log_event, CipherSyncData, CipherSyncType}, - EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, PasswordData, UpdateType, + core::{CipherSyncData, CipherSyncType}, + EmptyResult, JsonResult, JsonUpcase, JsonUpcaseVec, JsonVec, Notify, NumberOrString, + PasswordData, UpdateType, + }, + auth::{ + decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders, }, - auth::{decode_invite, AdminHeaders, Headers, ManagerHeaders, ManagerHeadersLoose, OwnerHeaders}, db::{models::*, DbConn}, error::Error, mail, util::convert_json_key_lcase_first, CONFIG, }; +use num_traits::FromPrimitive; +use rocket::serde::json::Json; +use rocket::Route; +use serde_json::Value; pub fn routes() -> Vec<Route> { routes![ @@ -149,11 +151,22 @@ struct OrgBulkIds { } #[post("/organizations", data = "<data>")] -async fn create_organization(headers: Headers, data: JsonUpcase<OrgData>, mut conn: DbConn) -> JsonResult { +async fn create_organization( + headers: Headers, + data: JsonUpcase<OrgData>, + mut conn: DbConn, +) -> JsonResult { if !CONFIG.is_org_creation_allowed(&headers.user.email) { err!("User not allowed to create organizations") } - if OrgPolicy::is_applicable_to_user(&headers.user.uuid, OrgPolicyType::SingleOrg, None, &mut conn).await { + if OrgPolicy::is_applicable_to_user( + &headers.user.uuid, + OrgPolicyType::SingleOrg, + None, + &mut conn, + ) + .await + { err!( "You may not create an organization. You belong to an organization which has a policy that prohibits you from being a member of any other organization." ) @@ -219,22 +232,16 @@ async fn leave_organization(org_id: &str, headers: Headers, mut conn: DbConn) -> None => err!("User not part of organization"), Some(user_org) => { if user_org.atype == UserOrgType::Owner - && UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, &mut conn).await <= 1 + && UserOrganization::count_confirmed_by_org_and_type( + org_id, + UserOrgType::Owner, + &mut conn, + ) + .await + <= 1 { err!("The last owner can't leave") } - - log_event( - EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - user_org.delete(&mut conn).await } } @@ -261,7 +268,7 @@ async fn put_organization( #[post("/organizations/<org_id>", data = "<data>")] async fn post_organization( org_id: &str, - headers: OwnerHeaders, + _headers: OwnerHeaders, data: JsonUpcase<OrganizationUpdateData>, mut conn: DbConn, ) -> JsonResult { @@ -271,23 +278,9 @@ async fn post_organization( Some(organization) => organization, None => err!("Can't find organization details"), }; - org.name = data.Name; org.billing_email = data.BillingEmail; - org.save(&mut conn).await?; - - log_event( - EventType::OrganizationUpdated as i32, - org_id, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - Ok(Json(org.to_json())) } @@ -306,7 +299,11 @@ async fn get_user_collections(headers: Headers, mut conn: DbConn) -> Json<Value> } #[get("/organizations/<org_id>/collections")] -async fn get_org_collections(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbConn) -> Json<Value> { +async fn get_org_collections( + org_id: &str, + _headers: ManagerHeadersLoose, + mut conn: DbConn, +) -> Json<Value> { Json(json!({ "Data": _get_org_collections(org_id, &mut conn).await, "Object": "list", @@ -315,31 +312,20 @@ async fn get_org_collections(org_id: &str, _headers: ManagerHeadersLoose, mut co } #[get("/organizations/<org_id>/collections/details")] -async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult { +async fn get_org_collections_details( + org_id: &str, + headers: ManagerHeadersLoose, + mut conn: DbConn, +) -> JsonResult { let mut data = Vec::new(); - - let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { - Some(u) => u, - None => err!("User is not part of organization"), - }; - + let user_org = + match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { + Some(u) => u, + None => err!("User is not part of organization"), + }; let coll_users = CollectionUser::find_by_organization(org_id, &mut conn).await; - for col in Collection::find_by_organization(org_id, &mut conn).await { - let groups: Vec<Value> = if CONFIG.org_groups_enabled() { - CollectionGroup::find_by_collection(&col.uuid, &mut conn) - .await - .iter() - .map(|collection_group| { - SelectionReadOnly::to_collection_group_details_read_only(collection_group).to_json() - }) - .collect() - } else { - // The Bitwarden clients seem to call this API regardless of whether groups are enabled, - // so just act as if there are no groups. - Vec::with_capacity(0) - }; - + let groups: Vec<Value> = Vec::new(); let mut assigned = false; let users: Vec<Value> = coll_users .iter() @@ -374,7 +360,11 @@ async fn get_org_collections_details(org_id: &str, headers: ManagerHeadersLoose, } async fn _get_org_collections(org_id: &str, conn: &mut DbConn) -> Value { - Collection::find_by_organization(org_id, conn).await.iter().map(Collection::to_json).collect::<Value>() + Collection::find_by_organization(org_id, conn) + .await + .iter() + .map(Collection::to_json) + .collect::<Value>() } #[post("/organizations/<org_id>/collections", data = "<data>")] @@ -390,25 +380,17 @@ async fn post_organization_collections( Some(organization) => organization, None => err!("Can't find organization details"), }; - let collection = Collection::new(org.uuid, data.Name, data.ExternalId); collection.save(&mut conn).await?; - - log_event( - EventType::CollectionCreated as i32, - &collection.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - for group in data.Groups { - CollectionGroup::new(collection.uuid.clone(), group.Id, group.ReadOnly, group.HidePasswords) - .save(&mut conn) - .await?; + CollectionGroup::new( + collection.uuid.clone(), + group.Id, + group.ReadOnly, + group.HidePasswords, + ) + .save(&mut conn) + .await?; } for user in data.Users { @@ -421,12 +403,25 @@ async fn post_organization_collections( continue; } - CollectionUser::save(&org_user.user_uuid, &collection.uuid, user.ReadOnly, user.HidePasswords, &mut conn) - .await?; + CollectionUser::save( + &org_user.user_uuid, + &collection.uuid, + user.ReadOnly, + user.HidePasswords, + &mut conn, + ) + .await?; } if headers.org_user.atype == UserOrgType::Manager && !headers.org_user.access_all { - CollectionUser::save(&headers.org_user.user_uuid, &collection.uuid, false, false, &mut conn).await?; + CollectionUser::save( + &headers.org_user.user_uuid, + &collection.uuid, + false, + false, + &mut conn, + ) + .await?; } Ok(Json(collection.to_json())) @@ -447,7 +442,7 @@ async fn put_organization_collection_update( async fn post_organization_collection_update( org_id: &str, col_id: &str, - headers: ManagerHeaders, + _headers: ManagerHeaders, data: JsonUpcase<NewCollectionData>, mut conn: DbConn, ) -> JsonResult { @@ -472,26 +467,18 @@ async fn post_organization_collection_update( Some(external_id) if !external_id.trim().is_empty() => Some(external_id), _ => None, }; - collection.save(&mut conn).await?; - - log_event( - EventType::CollectionUpdated as i32, - &collection.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - CollectionGroup::delete_all_by_collection(col_id, &mut conn).await?; for group in data.Groups { - CollectionGroup::new(String::from(col_id), group.Id, group.ReadOnly, group.HidePasswords) - .save(&mut conn) - .await?; + CollectionGroup::new( + String::from(col_id), + group.Id, + group.ReadOnly, + group.HidePasswords, + ) + .save(&mut conn) + .await?; } CollectionUser::delete_all_by_collection(col_id, &mut conn).await?; @@ -506,7 +493,14 @@ async fn post_organization_collection_update( continue; } - CollectionUser::save(&org_user.user_uuid, col_id, user.ReadOnly, user.HidePasswords, &mut conn).await?; + CollectionUser::save( + &org_user.user_uuid, + col_id, + user.ReadOnly, + user.HidePasswords, + &mut conn, + ) + .await?; } Ok(Json(collection.to_json())) @@ -534,7 +528,13 @@ async fn delete_organization_collection_user( match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { None => err!("User not found in organization"), Some(user_org) => { - match CollectionUser::find_by_collection_and_user(&collection.uuid, &user_org.user_uuid, &mut conn).await { + match CollectionUser::find_by_collection_and_user( + &collection.uuid, + &user_org.user_uuid, + &mut conn, + ) + .await + { None => err!("User not assigned to collection"), Some(col_user) => col_user.delete(&mut conn).await, } @@ -556,23 +556,13 @@ async fn post_organization_collection_delete_user( async fn _delete_organization_collection( org_id: &str, col_id: &str, - headers: &ManagerHeaders, + _headers: &ManagerHeaders, conn: &mut DbConn, ) -> EmptyResult { match Collection::find_by_uuid(col_id, conn).await { None => err!("Collection not found"), Some(collection) => { if collection.org_uuid == org_id { - log_event( - EventType::CollectionDeleted as i32, - &collection.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; collection.delete(conn).await } else { err!("Collection and Organization id do not match") @@ -598,7 +588,10 @@ struct DeleteCollectionData { OrgId: String, } -#[post("/organizations/<org_id>/collections/<col_id>/delete", data = "<_data>")] +#[post( + "/organizations/<org_id>/collections/<col_id>/delete", + data = "<_data>" +)] async fn post_organization_collection_delete( org_id: &str, col_id: &str, @@ -652,39 +645,33 @@ async fn get_org_collection_detail( err!("Collection is not owned by organization") } - let user_org = match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { - Some(u) => u, - None => err!("User is not part of organization"), - }; - - let groups: Vec<Value> = if CONFIG.org_groups_enabled() { - CollectionGroup::find_by_collection(&collection.uuid, &mut conn) + let user_org = + match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn) .await - .iter() - .map(|collection_group| { - SelectionReadOnly::to_collection_group_details_read_only(collection_group).to_json() - }) - .collect() - } else { - // The Bitwarden clients seem to call this API regardless of whether groups are enabled, - // so just act as if there are no groups. - Vec::with_capacity(0) - }; + { + Some(u) => u, + None => err!("User is not part of organization"), + }; + let groups: Vec<Value> = Vec::new(); let mut assigned = false; let users: Vec<Value> = - CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid(&collection.uuid, &mut conn) - .await - .iter() - .map(|collection_user| { - // Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `find_by_collection_swap_user_uuid_with_org_user_uuid` call. - // We check here if the current user is assigned to this collection or not. - if collection_user.user_uuid == user_org.uuid { - assigned = true; - } - SelectionReadOnly::to_collection_user_details_read_only(collection_user).to_json() - }) - .collect(); + CollectionUser::find_by_collection_swap_user_uuid_with_org_user_uuid( + &collection.uuid, + &mut conn, + ) + .await + .iter() + .map(|collection_user| { + // Remember `user_uuid` is swapped here with the `user_org.uuid` with a join during the `find_by_collection_swap_user_uuid_with_org_user_uuid` call. + // We check here if the current user is assigned to this collection or not. + if collection_user.user_uuid == user_org.uuid { + assigned = true; + } + SelectionReadOnly::to_collection_user_details_read_only(collection_user) + .to_json() + }) + .collect(); if user_org.access_all { assigned = true; @@ -702,7 +689,12 @@ async fn get_org_collection_detail( } #[get("/organizations/<org_id>/collections/<coll_id>/users")] -async fn get_collection_users(org_id: &str, coll_id: &str, _headers: ManagerHeaders, mut conn: DbConn) -> JsonResult { +async fn get_collection_users( + org_id: &str, + coll_id: &str, + _headers: ManagerHeaders, + mut conn: DbConn, +) -> JsonResult { // Get org and collection, check that collection is from org let collection = match Collection::find_by_uuid_and_org(coll_id, org_id, &mut conn).await { None => err!("Collection not found in Organization"), @@ -731,7 +723,10 @@ async fn put_collection_users( mut conn: DbConn, ) -> EmptyResult { // Get org and collection, check that collection is from org - if Collection::find_by_uuid_and_org(coll_id, org_id, &mut conn).await.is_none() { + if Collection::find_by_uuid_and_org(coll_id, org_id, &mut conn) + .await + .is_none() + { err!("Collection not found in Organization") } @@ -749,7 +744,14 @@ async fn put_collection_users( continue; } - CollectionUser::save(&user.user_uuid, coll_id, d.ReadOnly, d.HidePasswords, &mut conn).await?; + CollectionUser::save( + &user.user_uuid, + coll_id, + d.ReadOnly, + d.HidePasswords, + &mut conn, + ) + .await?; } Ok(()) @@ -776,8 +778,16 @@ async fn _get_org_details(org_id: &str, host: &str, user_uuid: &str, conn: &mut let mut ciphers_json = Vec::with_capacity(ciphers.len()); for c in ciphers { - ciphers_json - .push(c.to_json(host, user_uuid, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await); + ciphers_json.push( + c.to_json( + host, + user_uuid, + Some(&cipher_sync_data), + CipherSyncType::Organization, + conn, + ) + .await, + ); } json!(ciphers_json) } @@ -906,7 +916,10 @@ async fn send_invite( user } Some(user) => { - if UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await.is_some() { + if UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn) + .await + .is_some() + { err!(format!("User already in organization: {email}")) } else { // automatically accept existing users if mail is disabled @@ -930,8 +943,14 @@ async fn send_invite( match Collection::find_by_uuid_and_org(&col.Id, org_id, &mut conn).await { None => err!("Collection not found in Organization"), Some(collection) => { - CollectionUser::save(&user.uuid, &collection.uuid, col.ReadOnly, col.HidePasswords, &mut conn) - .await?; + CollectionUser::save( + &user.uuid, + &collection.uuid, + col.ReadOnly, + col.HidePasswords, + &mut conn, + ) + .await?; } } } @@ -943,18 +962,6 @@ async fn send_invite( let mut group_entry = GroupUser::new(String::from(group), user.uuid.clone()); group_entry.save(&mut conn).await?; } - - log_event( - EventType::OrganizationUserInvited as i32, - &new_user.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - if CONFIG.mail_enabled() { let org_name = match Organization::find_by_uuid(org_id, &mut conn).await { Some(org) => org.name, @@ -987,10 +994,11 @@ async fn bulk_reinvite_user( let mut bulk_response = Vec::new(); for org_user_id in data.Ids { - let err_msg = match _reinvite_user(org_id, &org_user_id, &headers.user.email, &mut conn).await { - Ok(_) => String::new(), - Err(e) => format!("{e:?}"), - }; + let err_msg = + match _reinvite_user(org_id, &org_user_id, &headers.user.email, &mut conn).await { + Ok(_) => String::new(), + Err(e) => format!("{e:?}"), + }; bulk_response.push(json!( { @@ -1009,11 +1017,21 @@ async fn bulk_reinvite_user( } #[post("/organizations/<org_id>/users/<user_org>/reinvite")] -async fn reinvite_user(org_id: &str, user_org: &str, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { +async fn reinvite_user( + org_id: &str, + user_org: &str, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { _reinvite_user(org_id, user_org, &headers.user.email, &mut conn).await } -async fn _reinvite_user(org_id: &str, user_org: &str, invited_by_email: &str, conn: &mut DbConn) -> EmptyResult { +async fn _reinvite_user( + org_id: &str, + user_org: &str, + invited_by_email: &str, + conn: &mut DbConn, +) -> EmptyResult { if !CONFIG.invitations_allowed() { err!("Invitations are not allowed.") } @@ -1082,16 +1100,18 @@ async fn accept_invite( Invitation::take(&claims.email, &mut conn).await; if let (Some(user_org), Some(org)) = (&claims.user_org_id, &claims.org_id) { - let mut user_org = match UserOrganization::find_by_uuid_and_org(user_org, org, &mut conn).await { - Some(user_org) => user_org, - None => err!("Error accepting the invitation"), - }; + let mut user_org = + match UserOrganization::find_by_uuid_and_org(user_org, org, &mut conn).await { + Some(user_org) => user_org, + None => err!("Error accepting the invitation"), + }; if user_org.status != UserOrgStatus::Invited as i32 { err!("User already accepted the invitation") } - let master_password_required = OrgPolicy::org_is_reset_password_auto_enroll(org, &mut conn).await; + let master_password_required = + OrgPolicy::org_is_reset_password_auto_enroll(org, &mut conn).await; if data.ResetPasswordKey.is_none() && master_password_required { err!("Reset password key is required, but not provided."); } @@ -1099,7 +1119,9 @@ async fn accept_invite( // This check is also done at accept_invite(), _confirm_invite, _activate_user(), edit_user(), admin::update_user_org_type // It returns different error messages per function. if user_org.atype < UserOrgType::Admin { - match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, &mut conn).await { + match OrgPolicy::is_user_allowed(&user_org.user_uuid, org_id, false, &mut conn) + .await + { Ok(_) => {} Err(OrgPolicyErr::TwoFactorMissing) => { err!("You cannot join this organization until you enable two-step login on your user account"); @@ -1158,10 +1180,13 @@ async fn bulk_confirm_invite( for invite in keys { let org_user_id = invite["Id"].as_str().unwrap_or_default(); let user_key = invite["Key"].as_str().unwrap_or_default(); - let err_msg = match _confirm_invite(org_id, org_user_id, user_key, &headers, &mut conn, &nt).await { - Ok(_) => String::new(), - Err(e) => format!("{e:?}"), - }; + let err_msg = + match _confirm_invite(org_id, org_user_id, user_key, &headers, &mut conn, &nt) + .await + { + Ok(_) => String::new(), + Err(e) => format!("{e:?}"), + }; bulk_response.push(json!( { @@ -1172,7 +1197,7 @@ async fn bulk_confirm_invite( )); } } - None => error!("No keys to confirm"), + None => panic!("No keys to confirm"), } Json(json!({ @@ -1208,10 +1233,11 @@ async fn _confirm_invite( err!("Key or UserId is not set, unable to process request"); } - let mut user_to_confirm = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { - Some(user) => user, - None => err!("The specified user isn't a member of the organization"), - }; + let mut user_to_confirm = + match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { + Some(user) => user, + None => err!("The specified user isn't a member of the organization"), + }; if user_to_confirm.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner { err!("Only Owners can confirm Managers, Admins or Owners") @@ -1237,18 +1263,6 @@ async fn _confirm_invite( user_to_confirm.status = UserOrgStatus::Confirmed as i32; user_to_confirm.akey = key.to_string(); - - log_event( - EventType::OrganizationUserConfirmed as i32, - &user_to_confirm.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - if CONFIG.mail_enabled() { let org_name = match Organization::find_by_uuid(org_id, conn).await { Some(org) => org.name, @@ -1287,7 +1301,12 @@ async fn get_user( // Else these will not be shown in the interface, and could lead to missing collections when saved. let include_groups = data.include_groups.unwrap_or(false); Ok(Json( - user.to_json_user_details(data.include_collections.unwrap_or(include_groups), include_groups, &mut conn).await, + user.to_json_user_details( + data.include_collections.unwrap_or(include_groups), + include_groups, + &mut conn, + ) + .await, )) } @@ -1300,7 +1319,11 @@ struct EditUserData { AccessAll: bool, } -#[put("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)] +#[put( + "/organizations/<org_id>/users/<org_user_id>", + data = "<data>", + rank = 1 +)] async fn put_organization_user( org_id: &str, org_user_id: &str, @@ -1311,7 +1334,11 @@ async fn put_organization_user( edit_user(org_id, org_user_id, data, headers, conn).await } -#[post("/organizations/<org_id>/users/<org_user_id>", data = "<data>", rank = 1)] +#[post( + "/organizations/<org_id>/users/<org_user_id>", + data = "<data>", + rank = 1 +)] async fn edit_user( org_id: &str, org_user_id: &str, @@ -1326,10 +1353,11 @@ async fn edit_user( None => err!("Invalid type"), }; - let mut user_to_edit = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { - Some(user) => user, - None => err!("The specified user isn't member of the organization"), - }; + let mut user_to_edit = + match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { + Some(user) => user, + None => err!("The specified user isn't member of the organization"), + }; if new_type != user_to_edit.atype && (user_to_edit.atype >= UserOrgType::Admin || new_type >= UserOrgType::Admin) @@ -1347,7 +1375,10 @@ async fn edit_user( && user_to_edit.status == UserOrgStatus::Confirmed as i32 { // Removing owner permission, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, &mut conn).await <= 1 { + if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, &mut conn) + .await + <= 1 + { err!("Can't delete the last owner") } } @@ -1370,7 +1401,13 @@ async fn edit_user( user_to_edit.atype = new_type as i32; // Delete all the odd collections - for c in CollectionUser::find_by_organization_and_user_uuid(org_id, &user_to_edit.user_uuid, &mut conn).await { + for c in CollectionUser::find_by_organization_and_user_uuid( + org_id, + &user_to_edit.user_uuid, + &mut conn, + ) + .await + { c.delete(&mut conn).await?; } @@ -1399,18 +1436,6 @@ async fn edit_user( let mut group_entry = GroupUser::new(String::from(group), user_to_edit.uuid.clone()); group_entry.save(&mut conn).await?; } - - log_event( - EventType::OrganizationUserUpdated as i32, - &user_to_edit.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - user_to_edit.save(&mut conn).await } @@ -1476,33 +1501,26 @@ async fn _delete_user( conn: &mut DbConn, nt: &Notify<'_>, ) -> EmptyResult { - let user_to_delete = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { - Some(user) => user, - None => err!("User to delete isn't member of the organization"), - }; + let user_to_delete = + match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { + Some(user) => user, + None => err!("User to delete isn't member of the organization"), + }; if user_to_delete.atype != UserOrgType::User && headers.org_user_type != UserOrgType::Owner { err!("Only Owners can delete Admins or Owners") } - if user_to_delete.atype == UserOrgType::Owner && user_to_delete.status == UserOrgStatus::Confirmed as i32 { + if user_to_delete.atype == UserOrgType::Owner + && user_to_delete.status == UserOrgStatus::Confirmed as i32 + { // Removing owner, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, conn).await <= 1 { + if UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, conn).await + <= 1 + { err!("Can't delete the last owner") } } - - log_event( - EventType::OrganizationUserRemoved as i32, - &user_to_delete.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - if let Some(user) = User::find_by_uuid(&user_to_delete.user_uuid, conn).await { nt.send_user_update(UpdateType::SyncOrgKeys, &user).await; } @@ -1588,7 +1606,10 @@ async fn post_org_import( for coll in data.Collections { let collection = Collection::new(org_id.clone(), coll.Name, coll.ExternalId); if collection.save(&mut conn).await.is_err() { - collections.push(Err(Error::new("Failed to create Collection", "Failed to create Collection"))); + collections.push(Err(Error::new( + "Failed to create Collection", + "Failed to create Collection", + ))); } else { collections.push(Ok(collection)); } @@ -1605,7 +1626,17 @@ async fn post_org_import( let mut ciphers = Vec::new(); for cipher_data in data.Ciphers { let mut cipher = Cipher::new(cipher_data.Type, cipher_data.Name.clone()); - update_cipher_from_data(&mut cipher, cipher_data, &headers, false, &mut conn, &nt, UpdateType::None).await.ok(); + update_cipher_from_data( + &mut cipher, + cipher_data, + &headers, + false, + &mut conn, + &nt, + UpdateType::None, + ) + .await + .ok(); ciphers.push(cipher); } @@ -1662,7 +1693,12 @@ async fn list_policies_token(org_id: &str, token: &str, mut conn: DbConn) -> Jso } #[get("/organizations/<org_id>/policies/<pol_type>")] -async fn get_policy(org_id: &str, pol_type: i32, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { +async fn get_policy( + org_id: &str, + pol_type: i32, + _headers: AdminHeaders, + mut conn: DbConn, +) -> JsonResult { let pol_type_enum = match OrgPolicyType::from_i32(pol_type) { Some(pt) => pt, None => err!("Invalid or unsupported policy type"), @@ -1689,7 +1725,7 @@ async fn put_policy( org_id: &str, pol_type: i32, data: Json<PolicyData>, - headers: AdminHeaders, + _headers: AdminHeaders, mut conn: DbConn, ) -> JsonResult { let data: PolicyData = data.into_inner(); @@ -1701,8 +1737,13 @@ async fn put_policy( // When enabling the TwoFactorAuthentication policy, remove this org's members that do have 2FA if pol_type_enum == OrgPolicyType::TwoFactorAuthentication && data.enabled { - for member in UserOrganization::find_by_org(org_id, &mut conn).await.into_iter() { - let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &mut conn).await.is_empty(); + for member in UserOrganization::find_by_org(org_id, &mut conn) + .await + .into_iter() + { + let user_twofactor_disabled = TwoFactor::find_by_user(&member.user_uuid, &mut conn) + .await + .is_empty(); // Policy only applies to non-Owner/non-Admin members who have accepted joining the org // Invited users still need to accept the invite and will get an error when they try to accept the invite. @@ -1711,23 +1752,15 @@ async fn put_policy( && member.status != UserOrgStatus::Invited as i32 { if CONFIG.mail_enabled() { - let org = Organization::find_by_uuid(&member.org_uuid, &mut conn).await.unwrap(); - let user = User::find_by_uuid(&member.user_uuid, &mut conn).await.unwrap(); + let org = Organization::find_by_uuid(&member.org_uuid, &mut conn) + .await + .unwrap(); + let user = User::find_by_uuid(&member.user_uuid, &mut conn) + .await + .unwrap(); mail::send_2fa_removed_from_org(&user.email, &org.name).await?; } - - log_event( - EventType::OrganizationUserRemoved as i32, - &member.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - member.delete(&mut conn).await?; } } @@ -1735,33 +1768,33 @@ async fn put_policy( // When enabling the SingleOrg policy, remove this org's members that are members of other orgs if pol_type_enum == OrgPolicyType::SingleOrg && data.enabled { - for member in UserOrganization::find_by_org(org_id, &mut conn).await.into_iter() { + for member in UserOrganization::find_by_org(org_id, &mut conn) + .await + .into_iter() + { // Policy only applies to non-Owner/non-Admin members who have accepted joining the org // Exclude invited and revoked users when checking for this policy. // Those users will not be allowed to accept or be activated because of the policy checks done there. // We check if the count is larger then 1, because it includes this organization also. if member.atype < UserOrgType::Admin && member.status != UserOrgStatus::Invited as i32 - && UserOrganization::count_accepted_and_confirmed_by_user(&member.user_uuid, &mut conn).await > 1 + && UserOrganization::count_accepted_and_confirmed_by_user( + &member.user_uuid, + &mut conn, + ) + .await + > 1 { if CONFIG.mail_enabled() { - let org = Organization::find_by_uuid(&member.org_uuid, &mut conn).await.unwrap(); - let user = User::find_by_uuid(&member.user_uuid, &mut conn).await.unwrap(); + let org = Organization::find_by_uuid(&member.org_uuid, &mut conn) + .await + .unwrap(); + let user = User::find_by_uuid(&member.user_uuid, &mut conn) + .await + .unwrap(); mail::send_single_org_removed_from_org(&user.email, &org.name).await?; } - - log_event( - EventType::OrganizationUserRemoved as i32, - &member.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - member.delete(&mut conn).await?; } } @@ -1775,18 +1808,6 @@ async fn put_policy( policy.enabled = data.enabled; policy.data = serde_json::to_string(&data.data)?; policy.save(&mut conn).await?; - - log_event( - EventType::PolicyUpdated as i32, - &policy.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - Ok(Json(policy.to_json())) } @@ -1874,7 +1895,12 @@ struct OrgImportData { } #[post("/organizations/<org_id>/import", data = "<data>")] -async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers, mut conn: DbConn) -> EmptyResult { +async fn import( + org_id: &str, + data: JsonUpcase<OrgImportData>, + headers: Headers, + mut conn: DbConn, +) -> EmptyResult { let data = data.into_inner().data; // TODO: Currently we aren't storing the externalId's anywhere, so we also don't have a way @@ -1892,23 +1918,17 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers, for user_data in &data.Users { if user_data.Deleted { // If user is marked for deletion and it exists, delete it - if let Some(user_org) = UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &mut conn).await { - log_event( - EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - + if let Some(user_org) = + UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &mut conn).await + { user_org.delete(&mut conn).await?; } // If user is not part of the organization, but it exists - } else if UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &mut conn).await.is_none() { + } else if UserOrganization::find_by_email_and_org(&user_data.Email, org_id, &mut conn) + .await + .is_none() + { if let Some(user) = User::find_by_mail(&user_data.Email, &mut conn).await { let user_org_status = if CONFIG.mail_enabled() { UserOrgStatus::Invited as i32 @@ -1916,24 +1936,12 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers, UserOrgStatus::Accepted as i32 // Automatically mark user as accepted if no email invites }; - let mut new_org_user = UserOrganization::new(user.uuid.clone(), String::from(org_id)); + let mut new_org_user = + UserOrganization::new(user.uuid.clone(), String::from(org_id)); new_org_user.access_all = false; new_org_user.atype = UserOrgType::User as i32; new_org_user.status = user_org_status; - new_org_user.save(&mut conn).await?; - - log_event( - EventType::OrganizationUserInvited as i32, - &new_org_user.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - if CONFIG.mail_enabled() { let org_name = match Organization::find_by_uuid(org_id, &mut conn).await { Some(org) => org.name, @@ -1956,20 +1964,14 @@ async fn import(org_id: &str, data: JsonUpcase<OrgImportData>, headers: Headers, // If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true) if data.OverwriteExisting { - for user_org in UserOrganization::find_by_org_and_type(org_id, UserOrgType::User, &mut conn).await { - if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &mut conn).await.map(|u| u.email) { + for user_org in + UserOrganization::find_by_org_and_type(org_id, UserOrgType::User, &mut conn).await + { + if let Some(user_email) = User::find_by_uuid(&user_org.user_uuid, &mut conn) + .await + .map(|u| u.email) + { if !data.Users.iter().any(|u| u.Email == user_email) { - log_event( - EventType::OrganizationUserRemoved as i32, - &user_org.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - user_org.delete(&mut conn).await?; } } @@ -2025,10 +2027,12 @@ async fn bulk_revoke_organization_user( Some(org_users) => { for org_user_id in org_users { let org_user_id = org_user_id.as_str().unwrap_or_default(); - let err_msg = match _revoke_organization_user(org_id, org_user_id, &headers, &mut conn).await { - Ok(_) => String::new(), - Err(e) => format!("{e:?}"), - }; + let err_msg = + match _revoke_organization_user(org_id, org_user_id, &headers, &mut conn).await + { + Ok(_) => String::new(), + Err(e) => format!("{e:?}"), + }; bulk_response.push(json!( { @@ -2039,7 +2043,7 @@ async fn bulk_revoke_organization_user( )); } } - None => error!("No users to revoke"), + None => panic!("No users to revoke"), } Json(json!({ @@ -2064,24 +2068,18 @@ async fn _revoke_organization_user( err!("Only owners can revoke other owners") } if user_org.atype == UserOrgType::Owner - && UserOrganization::count_confirmed_by_org_and_type(org_id, UserOrgType::Owner, conn).await <= 1 + && UserOrganization::count_confirmed_by_org_and_type( + org_id, + UserOrgType::Owner, + conn, + ) + .await + <= 1 { err!("Organization must have at least one confirmed owner") } - user_org.revoke(); user_org.save(conn).await?; - - log_event( - EventType::OrganizationUserRevoked as i32, - &user_org.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; } Some(_) => err!("User is already revoked"), None => err!("User not found in organization"), @@ -2135,7 +2133,14 @@ async fn bulk_restore_organization_user( Some(org_users) => { for org_user_id in org_users { let org_user_id = org_user_id.as_str().unwrap_or_default(); - let err_msg = match _restore_organization_user(org_id, org_user_id, &headers, &mut conn).await { + let err_msg = match _restore_organization_user( + org_id, + org_user_id, + &headers, + &mut conn, + ) + .await + { Ok(_) => String::new(), Err(e) => format!("{e:?}"), }; @@ -2149,7 +2154,7 @@ async fn bulk_restore_organization_user( )); } } - None => error!("No users to restore"), + None => panic!("No users to restore"), } Json(json!({ @@ -2190,17 +2195,6 @@ async fn _restore_organization_user( user_org.restore(); user_org.save(conn).await?; - - log_event( - EventType::OrganizationUserRestored as i32, - &user_org.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; } Some(_) => err!("User is already active"), None => err!("User not found in organization"), @@ -2208,22 +2202,10 @@ async fn _restore_organization_user( Ok(()) } +#[allow(unused_variables)] #[get("/organizations/<org_id>/groups")] -async fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbConn) -> JsonResult { - let groups: Vec<Value> = if CONFIG.org_groups_enabled() { - // Group::find_by_organization(&org_id, &mut conn).await.iter().map(Group::to_json).collect::<Value>() - let groups = Group::find_by_organization(org_id, &mut conn).await; - let mut groups_json = Vec::with_capacity(groups.len()); - for g in groups { - groups_json.push(g.to_json_details(&mut conn).await) - } - groups_json - } else { - // The Bitwarden clients seem to call this API regardless of whether groups are enabled, - // so just act as if there are no groups. - Vec::with_capacity(0) - }; - +fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, _conn: DbConn) -> JsonResult { + let groups: Vec<Value> = Vec::new(); Ok(Json(json!({ "Data": groups, "Object": "list", @@ -2233,34 +2215,7 @@ async fn get_groups(org_id: &str, _headers: ManagerHeadersLoose, mut conn: DbCon #[derive(Deserialize)] #[allow(non_snake_case)] -struct GroupRequest { - Name: String, - AccessAll: Option<bool>, - ExternalId: Option<String>, - Collections: Vec<SelectionReadOnly>, - Users: Vec<String>, -} - -impl GroupRequest { - pub fn to_group(&self, organizations_uuid: &str) -> Group { - Group::new( - String::from(organizations_uuid), - self.Name.clone(), - self.AccessAll.unwrap_or(false), - self.ExternalId.clone(), - ) - } - - pub fn update_group(&self, mut group: Group) -> Group { - group.name = self.Name.clone(); - group.access_all = self.AccessAll.unwrap_or(false); - // Group Updates do not support changing the external_id - // These input fields are in a disabled state, and can only be updated/added via ldap_import - - group - } -} - +struct GroupRequest; #[derive(Deserialize, Serialize)] #[allow(non_snake_case)] struct SelectionReadOnly { @@ -2270,19 +2225,9 @@ struct SelectionReadOnly { } impl SelectionReadOnly { - pub fn to_collection_group(&self, groups_uuid: String) -> CollectionGroup { - CollectionGroup::new(self.Id.clone(), groups_uuid, self.ReadOnly, self.HidePasswords) - } - - pub fn to_collection_group_details_read_only(collection_group: &CollectionGroup) -> SelectionReadOnly { - SelectionReadOnly { - Id: collection_group.groups_uuid.clone(), - ReadOnly: collection_group.read_only, - HidePasswords: collection_group.hide_passwords, - } - } - - pub fn to_collection_user_details_read_only(collection_user: &CollectionUser) -> SelectionReadOnly { + pub fn to_collection_user_details_read_only( + collection_user: &CollectionUser, + ) -> SelectionReadOnly { SelectionReadOnly { Id: collection_user.user_uuid.clone(), ReadOnly: collection_user.read_only, @@ -2296,391 +2241,171 @@ impl SelectionReadOnly { } #[post("/organizations/<org_id>/groups/<group_id>", data = "<data>")] -async fn post_group( +fn post_group( org_id: &str, group_id: &str, data: JsonUpcase<GroupRequest>, headers: AdminHeaders, conn: DbConn, ) -> JsonResult { - put_group(org_id, group_id, data, headers, conn).await + put_group(org_id, group_id, data, headers, conn) } +#[allow(unused_variables)] #[post("/organizations/<org_id>/groups", data = "<data>")] -async fn post_groups( +fn post_groups( org_id: &str, - headers: AdminHeaders, + _headers: AdminHeaders, data: JsonUpcase<GroupRequest>, - mut conn: DbConn, + _conn: DbConn, ) -> JsonResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let group_request = data.into_inner().data; - let group = group_request.to_group(org_id); - - log_event( - EventType::GroupCreated as i32, - &group.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - - add_update_group(group, group_request.Collections, group_request.Users, org_id, &headers, &mut conn).await + err!("Group support is disabled") } +#[allow(unused_variables)] #[put("/organizations/<org_id>/groups/<group_id>", data = "<data>")] -async fn put_group( +fn put_group( org_id: &str, group_id: &str, data: JsonUpcase<GroupRequest>, - headers: AdminHeaders, - mut conn: DbConn, -) -> JsonResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(group) => group, - None => err!("Group not found"), - }; - - let group_request = data.into_inner().data; - let updated_group = group_request.update_group(group); - - CollectionGroup::delete_all_by_group(group_id, &mut conn).await?; - GroupUser::delete_all_by_group(group_id, &mut conn).await?; - - log_event( - EventType::GroupUpdated as i32, - &updated_group.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - - add_update_group(updated_group, group_request.Collections, group_request.Users, org_id, &headers, &mut conn).await -} - -async fn add_update_group( - mut group: Group, - collections: Vec<SelectionReadOnly>, - users: Vec<String>, - org_id: &str, - headers: &AdminHeaders, - conn: &mut DbConn, + _headers: AdminHeaders, + _conn: DbConn, ) -> JsonResult { - group.save(conn).await?; - - for selection_read_only_request in collections { - let mut collection_group = selection_read_only_request.to_collection_group(group.uuid.clone()); - collection_group.save(conn).await?; - } - - for assigned_user_id in users { - let mut user_entry = GroupUser::new(group.uuid.clone(), assigned_user_id.clone()); - user_entry.save(conn).await?; - - log_event( - EventType::OrganizationUserUpdatedGroups as i32, - &assigned_user_id, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - } - - Ok(Json(json!({ - "Id": group.uuid, - "OrganizationId": group.organizations_uuid, - "Name": group.name, - "AccessAll": group.access_all, - "ExternalId": group.external_id - }))) + err!("Group support is disabled") } +#[allow(unused_variables)] #[get("/organizations/<_org_id>/groups/<group_id>/details")] -async fn get_group_details(_org_id: &str, group_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(group) => group, - _ => err!("Group could not be found!"), - }; - - Ok(Json(group.to_json_details(&mut conn).await)) +fn get_group_details( + _org_id: &str, + group_id: &str, + _headers: AdminHeaders, + _conn: DbConn, +) -> JsonResult { + err!("Group support is disabled"); } #[post("/organizations/<org_id>/groups/<group_id>/delete")] -async fn post_delete_group(org_id: &str, group_id: &str, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { - _delete_group(org_id, group_id, &headers, &mut conn).await +fn post_delete_group( + org_id: &str, + group_id: &str, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { + _delete_group(org_id, group_id, &headers, &mut conn) } #[delete("/organizations/<org_id>/groups/<group_id>")] -async fn delete_group(org_id: &str, group_id: &str, headers: AdminHeaders, mut conn: DbConn) -> EmptyResult { - _delete_group(org_id, group_id, &headers, &mut conn).await +fn delete_group( + org_id: &str, + group_id: &str, + headers: AdminHeaders, + mut conn: DbConn, +) -> EmptyResult { + _delete_group(org_id, group_id, &headers, &mut conn) } - -async fn _delete_group(org_id: &str, group_id: &str, headers: &AdminHeaders, conn: &mut DbConn) -> EmptyResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let group = match Group::find_by_uuid(group_id, conn).await { - Some(group) => group, - _ => err!("Group not found"), - }; - - log_event( - EventType::GroupDeleted as i32, - &group.uuid, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - conn, - ) - .await; - - group.delete(conn).await +fn _delete_group(_: &str, _: &str, _: &AdminHeaders, _: &mut DbConn) -> EmptyResult { + err!("Group support is disabled"); } +#[allow(unused_variables)] #[delete("/organizations/<org_id>/groups", data = "<data>")] -async fn bulk_delete_groups( +fn bulk_delete_groups( org_id: &str, data: JsonUpcase<OrgBulkIds>, - headers: AdminHeaders, - mut conn: DbConn, + _headers: AdminHeaders, + _conn: DbConn, ) -> EmptyResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let data: OrgBulkIds = data.into_inner().data; - - for group_id in data.Ids { - _delete_group(org_id, &group_id, &headers, &mut conn).await? - } - Ok(()) + err!("Group support is disabled"); } +#[allow(unused_variables)] #[get("/organizations/<_org_id>/groups/<group_id>")] -async fn get_group(_org_id: &str, group_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(group) => group, - _ => err!("Group not found"), - }; - - Ok(Json(group.to_json())) +fn get_group(_org_id: &str, group_id: &str, _headers: AdminHeaders, _conn: DbConn) -> JsonResult { + err!("Group support is disabled"); } +#[allow(unused_variables)] #[get("/organizations/<_org_id>/groups/<group_id>/users")] -async fn get_group_users(_org_id: &str, group_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - match Group::find_by_uuid(group_id, &mut conn).await { - Some(_) => { /* Do nothing */ } - _ => err!("Group could not be found!"), - }; - - let group_users: Vec<String> = GroupUser::find_by_group(group_id, &mut conn) - .await - .iter() - .map(|entry| entry.users_organizations_uuid.clone()) - .collect(); - - Ok(Json(json!(group_users))) +fn get_group_users( + _org_id: &str, + group_id: &str, + _headers: AdminHeaders, + _conn: DbConn, +) -> JsonResult { + err!("Group support is disabled"); } - +#[allow(unused_variables)] #[put("/organizations/<org_id>/groups/<group_id>/users", data = "<data>")] -async fn put_group_users( +fn put_group_users( org_id: &str, group_id: &str, - headers: AdminHeaders, + _headers: AdminHeaders, data: JsonVec<String>, - mut conn: DbConn, + _conn: DbConn, ) -> EmptyResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - match Group::find_by_uuid(group_id, &mut conn).await { - Some(_) => { /* Do nothing */ } - _ => err!("Group could not be found!"), - }; - - GroupUser::delete_all_by_group(group_id, &mut conn).await?; - - let assigned_user_ids = data.into_inner(); - for assigned_user_id in assigned_user_ids { - let mut user_entry = GroupUser::new(String::from(group_id), assigned_user_id.clone()); - user_entry.save(&mut conn).await?; - - log_event( - EventType::OrganizationUserUpdatedGroups as i32, - &assigned_user_id, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - } - - Ok(()) + err!("Group support is disabled"); } +#[allow(unused_variables)] #[get("/organizations/<_org_id>/users/<user_id>/groups")] -async fn get_user_groups(_org_id: &str, user_id: &str, _headers: AdminHeaders, mut conn: DbConn) -> JsonResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - match UserOrganization::find_by_uuid(user_id, &mut conn).await { - Some(_) => { /* Do nothing */ } - _ => err!("User could not be found!"), - }; - - let user_groups: Vec<String> = - GroupUser::find_by_user(user_id, &mut conn).await.iter().map(|entry| entry.groups_uuid.clone()).collect(); - - Ok(Json(json!(user_groups))) +fn get_user_groups( + _org_id: &str, + user_id: &str, + _headers: AdminHeaders, + _conn: DbConn, +) -> JsonResult { + err!("Group support is disabled") } #[derive(Deserialize)] #[allow(non_snake_case)] -struct OrganizationUserUpdateGroupsRequest { - GroupIds: Vec<String>, -} +struct OrganizationUserUpdateGroupsRequest; #[post("/organizations/<org_id>/users/<org_user_id>/groups", data = "<data>")] -async fn post_user_groups( +fn post_user_groups( org_id: &str, org_user_id: &str, data: JsonUpcase<OrganizationUserUpdateGroupsRequest>, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - put_user_groups(org_id, org_user_id, data, headers, conn).await + put_user_groups(org_id, org_user_id, data, headers, conn) } +#[allow(unused_variables)] #[put("/organizations/<org_id>/users/<org_user_id>/groups", data = "<data>")] -async fn put_user_groups( +fn put_user_groups( org_id: &str, org_user_id: &str, data: JsonUpcase<OrganizationUserUpdateGroupsRequest>, - headers: AdminHeaders, - mut conn: DbConn, + _headers: AdminHeaders, + _conn: DbConn, ) -> EmptyResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let user_org = match UserOrganization::find_by_uuid(org_user_id, &mut conn).await { - Some(uo) => uo, - _ => err!("User could not be found!"), - }; - - if user_org.org_uuid != org_id { - err!("Group doesn't belong to organization"); - } - - GroupUser::delete_all_by_user(org_user_id, &mut conn).await?; - - let assigned_group_ids = data.into_inner().data; - for assigned_group_id in assigned_group_ids.GroupIds { - let mut group_user = GroupUser::new(assigned_group_id.clone(), String::from(org_user_id)); - group_user.save(&mut conn).await?; - } - - log_event( - EventType::OrganizationUserUpdatedGroups as i32, - org_user_id, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - - Ok(()) + err!("Group support is disabled") } #[post("/organizations/<org_id>/groups/<group_id>/delete-user/<org_user_id>")] -async fn post_delete_group_user( +fn post_delete_group_user( org_id: &str, group_id: &str, org_user_id: &str, headers: AdminHeaders, conn: DbConn, ) -> EmptyResult { - delete_group_user(org_id, group_id, org_user_id, headers, conn).await + delete_group_user(org_id, group_id, org_user_id, headers, conn) } +#[allow(unused_variables)] #[delete("/organizations/<org_id>/groups/<group_id>/users/<org_user_id>")] -async fn delete_group_user( +fn delete_group_user( org_id: &str, group_id: &str, org_user_id: &str, - headers: AdminHeaders, - mut conn: DbConn, + _headers: AdminHeaders, + _conn: DbConn, ) -> EmptyResult { - if !CONFIG.org_groups_enabled() { - err!("Group support is disabled"); - } - - let user_org = match UserOrganization::find_by_uuid(org_user_id, &mut conn).await { - Some(uo) => uo, - _ => err!("User could not be found!"), - }; - - if user_org.org_uuid != org_id { - err!("User doesn't belong to organization"); - } - - let group = match Group::find_by_uuid(group_id, &mut conn).await { - Some(g) => g, - _ => err!("Group could not be found!"), - }; - - if group.organizations_uuid != org_id { - err!("Group doesn't belong to organization"); - } - - log_event( - EventType::OrganizationUserUpdatedGroups as i32, - org_user_id, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - - GroupUser::delete_by_group_id_and_user_id(group_id, org_user_id, &mut conn).await + err!("Group support is disabled") } #[derive(Deserialize)] @@ -2711,7 +2436,10 @@ async fn get_organization_keys(org_id: &str, mut conn: DbConn) -> JsonResult { }))) } -#[put("/organizations/<org_id>/users/<org_user_id>/reset-password", data = "<data>")] +#[put( + "/organizations/<org_id>/users/<org_user_id>/reset-password", + data = "<data>" +)] async fn put_reset_password( org_id: &str, org_user_id: &str, @@ -2725,17 +2453,19 @@ async fn put_reset_password( None => err!("Required organization not found"), }; - let org_user = match UserOrganization::find_by_uuid_and_org(org_user_id, &org.uuid, &mut conn).await { - Some(user) => user, - None => err!("User to reset isn't member of required organization"), - }; + let org_user = + match UserOrganization::find_by_uuid_and_org(org_user_id, &org.uuid, &mut conn).await { + Some(user) => user, + None => err!("User to reset isn't member of required organization"), + }; let user = match User::find_by_uuid(&org_user.user_uuid, &mut conn).await { Some(user) => user, None => err!("User not found"), }; - check_reset_password_applicable_and_permissions(org_id, org_user_id, &headers, &mut conn).await?; + check_reset_password_applicable_and_permissions(org_id, org_user_id, &headers, &mut conn) + .await?; if org_user.reset_password_key.is_none() { err!("Password reset not or not correctly enrolled"); @@ -2753,22 +2483,14 @@ async fn put_reset_password( let reset_request = data.into_inner().data; let mut user = user; - user.set_password(reset_request.NewMasterPasswordHash.as_str(), Some(reset_request.Key), true, None); + user.set_password( + reset_request.NewMasterPasswordHash.as_str(), + Some(reset_request.Key), + true, + None, + ); user.save(&mut conn).await?; - nt.send_logout(&user, None).await; - - log_event( - EventType::OrganizationUserAdminResetPassword as i32, - org_user_id, - org_id, - headers.user.uuid.clone(), - headers.device.atype, - &headers.ip.ip, - &mut conn, - ) - .await; - Ok(()) } @@ -2784,17 +2506,19 @@ async fn get_reset_password_details( None => err!("Required organization not found"), }; - let org_user = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { - Some(user) => user, - None => err!("User to reset isn't member of required organization"), - }; + let org_user = + match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, &mut conn).await { + Some(user) => user, + None => err!("User to reset isn't member of required organization"), + }; let user = match User::find_by_uuid(&org_user.user_uuid, &mut conn).await { Some(user) => user, None => err!("User not found"), }; - check_reset_password_applicable_and_permissions(org_id, org_user_id, &headers, &mut conn).await?; + check_reset_password_applicable_and_permissions(org_id, org_user_id, &headers, &mut conn) + .await?; // https://github.com/bitwarden/server/blob/3b50ccb9f804efaacdc46bed5b60e5b28eddefcf/src/Api/Models/Response/Organizations/OrganizationUserResponseModel.cs#L111 Ok(Json(json!({ @@ -2817,7 +2541,8 @@ async fn check_reset_password_applicable_and_permissions( ) -> EmptyResult { check_reset_password_applicable(org_id, conn).await?; - let target_user = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await { + let target_user = match UserOrganization::find_by_uuid_and_org(org_user_id, org_id, conn).await + { Some(user) => user, None => err!("Reset target user not found"), }; @@ -2835,10 +2560,11 @@ async fn check_reset_password_applicable(org_id: &str, conn: &mut DbConn) -> Emp err!("Password reset is not supported on an email-disabled instance."); } - let policy = match OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::ResetPassword, conn).await { - Some(p) => p, - None => err!("Policy not found"), - }; + let policy = + match OrgPolicy::find_by_org_and_type(org_id, OrgPolicyType::ResetPassword, conn).await { + Some(p) => p, + None => err!("Policy not found"), + }; if !policy.enabled { err!("Reset password policy not enabled"); @@ -2846,8 +2572,11 @@ async fn check_reset_password_applicable(org_id: &str, conn: &mut DbConn) -> Emp Ok(()) } - -#[put("/organizations/<org_id>/users/<org_user_id>/reset-password-enrollment", data = "<data>")] +#[allow(unused_variables)] +#[put( + "/organizations/<org_id>/users/<org_user_id>/reset-password-enrollment", + data = "<data>" +)] async fn put_reset_password_enrollment( org_id: &str, org_user_id: &str, @@ -2855,16 +2584,18 @@ async fn put_reset_password_enrollment( data: JsonUpcase<OrganizationUserResetPasswordEnrollmentRequest>, mut conn: DbConn, ) -> EmptyResult { - let mut org_user = match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { - Some(u) => u, - None => err!("User to enroll isn't member of required organization"), - }; + let mut org_user = + match UserOrganization::find_by_user_and_org(&headers.user.uuid, org_id, &mut conn).await { + Some(u) => u, + None => err!("User to enroll isn't member of required organization"), + }; check_reset_password_applicable(org_id, &mut conn).await?; let reset_request = data.into_inner().data; - if reset_request.ResetPasswordKey.is_none() && OrgPolicy::org_is_reset_password_auto_enroll(org_id, &mut conn).await + if reset_request.ResetPasswordKey.is_none() + && OrgPolicy::org_is_reset_password_auto_enroll(org_id, &mut conn).await { err!("Reset password can't be withdrawed due to an enterprise policy"); } @@ -2882,16 +2613,6 @@ async fn put_reset_password_enrollment( org_user.reset_password_key = reset_request.ResetPasswordKey; org_user.save(&mut conn).await?; - - let log_id = if org_user.reset_password_key.is_some() { - EventType::OrganizationUserResetPasswordEnroll as i32 - } else { - EventType::OrganizationUserResetPasswordWithdraw as i32 - }; - - log_event(log_id, org_user_id, org_id, headers.user.uuid.clone(), headers.device.atype, &headers.ip.ip, &mut conn) - .await; - Ok(()) } @@ -2963,14 +2684,20 @@ async fn _api_key( if rotate { org_api_key.api_key = crate::crypto::generate_api_key(); org_api_key.revision_date = chrono::Utc::now().naive_utc(); - org_api_key.save(&conn).await.expect("Error rotating organization API Key"); + org_api_key + .save(&conn) + .await + .expect("Error rotating organization API Key"); } org_api_key } None => { let api_key = crate::crypto::generate_api_key(); let new_org_api_key = OrganizationApiKey::new(String::from(org_id), api_key); - new_org_api_key.save(&conn).await.expect("Error creating organization API Key"); + new_org_api_key + .save(&conn) + .await + .expect("Error creating organization API Key"); new_org_api_key } }; @@ -2983,7 +2710,12 @@ async fn _api_key( } #[post("/organizations/<org_id>/api-key", data = "<data>")] -async fn api_key(org_id: &str, data: JsonUpcase<PasswordData>, headers: AdminHeaders, conn: DbConn) -> JsonResult { +async fn api_key( + org_id: &str, + data: JsonUpcase<PasswordData>, + headers: AdminHeaders, + conn: DbConn, +) -> JsonResult { _api_key(org_id, data, false, headers, conn).await } diff --git a/src/api/core/public.rs b/src/api/core/public.rs @@ -19,11 +19,7 @@ pub fn routes() -> Vec<Route> { #[derive(Deserialize)] #[allow(non_snake_case)] -struct OrgImportGroupData { - Name: String, - ExternalId: String, - MemberExternalIds: Vec<String>, -} +struct OrgImportGroupData; #[derive(Deserialize)] #[allow(non_snake_case)] @@ -36,14 +32,17 @@ struct OrgImportUserData { #[derive(Deserialize)] #[allow(non_snake_case)] struct OrgImportData { - Groups: Vec<OrgImportGroupData>, Members: Vec<OrgImportUserData>, OverwriteExisting: bool, // LargeImport: bool, // For now this will not be used, upstream uses this to prevent syncs of more then 2000 users or groups without the flag set. } #[post("/public/organization/import", data = "<data>")] -async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut conn: DbConn) -> EmptyResult { +async fn ldap_import( + data: JsonUpcase<OrgImportData>, + token: PublicToken, + mut conn: DbConn, +) -> EmptyResult { // Most of the logic for this function can be found here // https://github.com/bitwarden/server/blob/fd892b2ff4547648a276734fb2b14a8abae2c6f5/src/Core/Services/Implementations/OrganizationService.cs#L1797 @@ -60,7 +59,12 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co let revoked = if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 { - if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn).await + if UserOrganization::count_confirmed_by_org_and_type( + &org_id, + UserOrgType::Owner, + &mut conn, + ) + .await <= 1 { warn!("Can't revoke the last owner"); @@ -117,10 +121,11 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co new_org_user.save(&mut conn).await?; if CONFIG.mail_enabled() { - let (org_name, org_email) = match Organization::find_by_uuid(&org_id, &mut conn).await { - Some(org) => (org.name, org.billing_email), - None => err!("Error looking up organization"), - }; + let (org_name, org_email) = + match Organization::find_by_uuid(&org_id, &mut conn).await { + Some(org) => (org.name, org.billing_email), + None => err!("Error looking up organization"), + }; mail::send_invite( &user_data.Email, @@ -134,44 +139,25 @@ async fn ldap_import(data: JsonUpcase<OrgImportData>, token: PublicToken, mut co } } } - - if CONFIG.org_groups_enabled() { - for group_data in &data.Groups { - let group_uuid = match Group::find_by_external_id(&group_data.ExternalId, &mut conn).await { - Some(group) => group.uuid, - None => { - let mut group = - Group::new(org_id.clone(), group_data.Name.clone(), false, Some(group_data.ExternalId.clone())); - group.save(&mut conn).await?; - group.uuid - } - }; - - GroupUser::delete_all_by_group(&group_uuid, &mut conn).await?; - - for ext_id in &group_data.MemberExternalIds { - if let Some(user_org) = UserOrganization::find_by_external_id_and_org(ext_id, &org_id, &mut conn).await - { - let mut group_user = GroupUser::new(group_uuid.clone(), user_org.uuid.clone()); - group_user.save(&mut conn).await?; - } - } - } - } else { - warn!("Group support is disabled, groups will not be imported!"); - } - + warn!("Group support is disabled, groups will not be imported!"); // If this flag is enabled, any user that isn't provided in the Users list will be removed (by default they will be kept unless they have Deleted == true) if data.OverwriteExisting { // Generate a HashSet to quickly verify if a member is listed or not. - let sync_members: HashSet<String> = data.Members.into_iter().map(|m| m.ExternalId).collect(); + let sync_members: HashSet<String> = + data.Members.into_iter().map(|m| m.ExternalId).collect(); for user_org in UserOrganization::find_by_org(&org_id, &mut conn).await { if let Some(ref user_external_id) = user_org.external_id { if !sync_members.contains(user_external_id) { - if user_org.atype == UserOrgType::Owner && user_org.status == UserOrgStatus::Confirmed as i32 { + if user_org.atype == UserOrgType::Owner + && user_org.status == UserOrgStatus::Confirmed as i32 + { // Removing owner, check that there is at least one other confirmed owner - if UserOrganization::count_confirmed_by_org_and_type(&org_id, UserOrgType::Owner, &mut conn) - .await + if UserOrganization::count_confirmed_by_org_and_type( + &org_id, + UserOrgType::Owner, + &mut conn, + ) + .await <= 1 { warn!("Can't delete the last owner"); diff --git a/src/api/core/sends.rs b/src/api/core/sends.rs @@ -10,7 +10,7 @@ use serde_json::Value; use crate::{ api::{ApiResult, EmptyResult, JsonResult, JsonUpcase, Notify, NumberOrString, UpdateType}, auth::{ClientIp, Headers, Host}, - db::{models::*, DbConn, DbPool}, + db::{models::*, DbConn}, util::SafeString, CONFIG, }; @@ -37,15 +37,6 @@ pub fn routes() -> Vec<rocket::Route> { ] } -pub async fn purge_sends(pool: DbPool) { - debug!("Purging sends"); - if let Ok(mut conn) = pool.get().await { - Send::purge(&mut conn).await; - } else { - error!("Failed to get DB connection while purging sends") - } -} - #[derive(Deserialize)] #[allow(non_snake_case)] struct SendData { @@ -90,7 +81,11 @@ async fn enforce_disable_send_policy(headers: &Headers, conn: &mut DbConn) -> Em /// but is allowed to remove this option from an existing Send. /// /// Ref: https://bitwarden.com/help/article/policies/#send-options -async fn enforce_disable_hide_email_policy(data: &SendData, headers: &Headers, conn: &mut DbConn) -> EmptyResult { +async fn enforce_disable_hide_email_policy( + data: &SendData, + headers: &Headers, + conn: &mut DbConn, +) -> EmptyResult { let user_uuid = &headers.user.uuid; let hide_email = data.HideEmail.unwrap_or(false); if hide_email && OrgPolicy::is_hide_email_disabled(user_uuid, conn).await { @@ -124,7 +119,13 @@ fn create_send(data: SendData, user_uuid: String) -> ApiResult<Send> { ); } - let mut send = Send::new(data.Type, data.Name, data_str, data.Key, data.DeletionDate.naive_utc()); + let mut send = Send::new( + data.Type, + data.Name, + data_str, + data.Key, + data.DeletionDate.naive_utc(), + ); send.user_uuid = Some(user_uuid); send.notes = data.Notes; send.max_access_count = match data.MaxAccessCount { @@ -168,7 +169,12 @@ async fn get_send(uuid: &str, headers: Headers, mut conn: DbConn) -> JsonResult } #[post("/sends", data = "<data>")] -async fn post_send(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn post_send( + data: JsonUpcase<SendData>, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { enforce_disable_send_policy(&headers, &mut conn).await?; let data: SendData = data.into_inner().data; @@ -207,13 +213,15 @@ struct UploadDataV2<'f> { // This method still exists to support older clients, probably need to remove it sometime. // Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L164-L167 #[post("/sends/file", format = "multipart/form-data", data = "<data>")] -async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn post_send_file( + data: Form<UploadData<'_>>, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { enforce_disable_send_policy(&headers, &mut conn).await?; - let UploadData { - model, - mut data, - } = data.into_inner(); + let UploadData { model, mut data } = data.into_inner(); let model = model.into_inner().data; enforce_disable_hide_email_policy(&model, &headers, &mut conn).await?; @@ -221,7 +229,8 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: let size_limit = match CONFIG.user_attachment_limit() { Some(0) => err!("File uploads are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await; + let left = + (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await; if left <= 0 { err!("Attachment storage limit reached! Delete some attachments to free up space") } @@ -241,7 +250,9 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: } let file_id = crate::crypto::generate_send_id(); - let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid); + let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()) + .await? + .join(&send.uuid); let file_path = folder_path.join(&file_id); tokio::fs::create_dir_all(&folder_path).await?; @@ -253,7 +264,10 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: if let Some(o) = data_value.as_object_mut() { o.insert(String::from("Id"), Value::String(file_id)); o.insert(String::from("Size"), Value::Number(size.into())); - o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(size as i32))); + o.insert( + String::from("SizeName"), + Value::String(crate::util::get_display_size(size as i32)), + ); } send.data = serde_json::to_string(&data_value)?; @@ -273,7 +287,11 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn: // Upstream: https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L190 #[post("/sends/file/v2", data = "<data>")] -async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn post_send_file_v2( + data: JsonUpcase<SendData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { enforce_disable_send_policy(&headers, &mut conn).await?; let data = data.into_inner().data; @@ -292,7 +310,8 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con let size_limit = match CONFIG.user_attachment_limit() { Some(0) => err!("File uploads are disabled"), Some(limit_kb) => { - let left = (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await; + let left = + (limit_kb * 1024) - Attachment::size_by_user(&headers.user.uuid, &mut conn).await; if left <= 0 { err!("Attachment storage limit reached! Delete some attachments to free up space") } @@ -312,8 +331,14 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con let mut data_value: Value = serde_json::from_str(&send.data)?; if let Some(o) = data_value.as_object_mut() { o.insert(String::from("Id"), Value::String(file_id.clone())); - o.insert(String::from("Size"), Value::Number(file_length.unwrap().into())); - o.insert(String::from("SizeName"), Value::String(crate::util::get_display_size(file_length.unwrap()))); + o.insert( + String::from("Size"), + Value::Number(file_length.unwrap().into()), + ); + o.insert( + String::from("SizeName"), + Value::String(crate::util::get_display_size(file_length.unwrap())), + ); } send.data = serde_json::to_string(&data_value)?; send.save(&mut conn).await?; @@ -327,7 +352,11 @@ async fn post_send_file_v2(data: JsonUpcase<SendData>, headers: Headers, mut con } // https://github.com/bitwarden/server/blob/d0c793c95181dfb1b447eb450f85ba0bfd7ef643/src/Api/Controllers/SendsController.cs#L243 -#[post("/sends/<send_uuid>/file/<file_id>", format = "multipart/form-data", data = "<data>")] +#[post( + "/sends/<send_uuid>/file/<file_id>", + format = "multipart/form-data", + data = "<data>" +)] async fn post_send_file_v2_data( send_uuid: &str, file_id: &str, @@ -351,7 +380,9 @@ async fn post_send_file_v2_data( err!("Send doesn't belong to user"); } - let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_uuid); + let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()) + .await? + .join(send_uuid); let file_path = folder_path.join(file_id); tokio::fs::create_dir_all(&folder_path).await?; @@ -505,7 +536,13 @@ async fn post_access_file( async fn download_send(send_id: SafeString, file_id: SafeString, t: &str) -> Option<NamedFile> { if let Ok(claims) = crate::auth::decode_send(t) { if claims.sub == format!("{send_id}/{file_id}") { - return NamedFile::open(Path::new(&CONFIG.sends_folder()).join(send_id).join(file_id)).await.ok(); + return NamedFile::open( + Path::new(&CONFIG.sends_folder()) + .join(send_id) + .join(file_id), + ) + .await + .ok(); } } None @@ -609,7 +646,12 @@ async fn delete_send(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_ } #[put("/sends/<id>/remove-password")] -async fn put_remove_password(id: &str, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult { +async fn put_remove_password( + id: &str, + headers: Headers, + mut conn: DbConn, + nt: Notify<'_>, +) -> JsonResult { enforce_disable_send_policy(&headers, &mut conn).await?; let mut send = match Send::find_by_uuid(id, &mut conn).await { diff --git a/src/api/core/two_factor/authenticator.rs b/src/api/core/two_factor/authenticator.rs @@ -3,10 +3,7 @@ use rocket::serde::json::Json; use rocket::Route; use crate::{ - api::{ - core::log_user_event, core::two_factor::_generate_recover_code, EmptyResult, JsonResult, JsonUpcase, - NumberOrString, PasswordData, - }, + api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData}, auth::{ClientIp, Headers}, crypto, db::{ @@ -18,11 +15,19 @@ use crate::{ pub use crate::config::CONFIG; pub fn routes() -> Vec<Route> { - routes![generate_authenticator, activate_authenticator, activate_authenticator_put,] + routes![ + generate_authenticator, + activate_authenticator, + activate_authenticator_put, + ] } #[post("/two-factor/get-authenticator", data = "<data>")] -async fn generate_authenticator(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn generate_authenticator( + data: JsonUpcase<PasswordData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { let data: PasswordData = data.into_inner().data; let user = headers.user; @@ -63,8 +68,7 @@ async fn activate_authenticator( let password_hash = data.MasterPasswordHash; let key = data.Key; let token = data.Token.into_string(); - - let mut user = headers.user; + let user = headers.user; if !user.check_valid_password(&password_hash) { err!("Invalid password"); @@ -81,12 +85,14 @@ async fn activate_authenticator( } // Validate the token provided with the key, and save new twofactor - validate_totp_code(&user.uuid, &token, &key.to_uppercase(), &headers.ip, &mut conn).await?; - - _generate_recover_code(&mut user, &mut conn).await; - - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; - + validate_totp_code( + &user.uuid, + &token, + &key.to_uppercase(), + &headers.ip, + &mut conn, + ) + .await?; Ok(Json(json!({ "Enabled": true, "Key": key, @@ -131,11 +137,20 @@ pub async fn validate_totp_code( Err(_) => err!("Invalid TOTP secret"), }; - let mut twofactor = - match TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Authenticator as i32, conn).await { - Some(tf) => tf, - _ => TwoFactor::new(user_uuid.to_string(), TwoFactorType::Authenticator, secret.to_string()), - }; + let mut twofactor = match TwoFactor::find_by_user_and_type( + user_uuid, + TwoFactorType::Authenticator as i32, + conn, + ) + .await + { + Some(tf) => tf, + _ => TwoFactor::new( + user_uuid.to_string(), + TwoFactorType::Authenticator, + secret.to_string(), + ), + }; // The amount of steps back and forward in time // Also check if we need to disable time drifted TOTP codes. @@ -167,9 +182,16 @@ pub async fn validate_totp_code( twofactor.save(conn).await?; return Ok(()); } else if generated == totp_code && time_step <= i64::from(twofactor.last_used) { - warn!("This TOTP or a TOTP code within {} steps back or forward has already been used!", steps); + warn!( + "This TOTP or a TOTP code within {} steps back or forward has already been used!", + steps + ); err!( - format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip), + format!( + "Invalid TOTP code! Server time: {} IP: {}", + current_time.format("%F %T UTC"), + ip.ip + ), ErrorEvent { event: EventType::UserFailedLogIn2fa } @@ -179,7 +201,11 @@ pub async fn validate_totp_code( // Else no valid code received, deny access err!( - format!("Invalid TOTP code! Server time: {} IP: {}", current_time.format("%F %T UTC"), ip.ip), + format!( + "Invalid TOTP code! Server time: {} IP: {}", + current_time.format("%F %T UTC"), + ip.ip + ), ErrorEvent { event: EventType::UserFailedLogIn2fa } diff --git a/src/api/core/two_factor/duo.rs b/src/api/core/two_factor/duo.rs @@ -1,373 +0,0 @@ -use chrono::Utc; -use data_encoding::BASE64; -use rocket::serde::json::Json; -use rocket::Route; - -use crate::{ - api::{ - core::log_user_event, core::two_factor::_generate_recover_code, ApiResult, EmptyResult, JsonResult, JsonUpcase, - PasswordData, - }, - auth::Headers, - crypto, - db::{ - models::{EventType, TwoFactor, TwoFactorType, User}, - DbConn, - }, - error::MapResult, - util::get_reqwest_client, - CONFIG, -}; - -pub fn routes() -> Vec<Route> { - routes![get_duo, activate_duo, activate_duo_put,] -} - -#[derive(Serialize, Deserialize)] -struct DuoData { - host: String, // Duo API hostname - ik: String, // integration key - sk: String, // secret key -} - -impl DuoData { - fn global() -> Option<Self> { - match (CONFIG._enable_duo(), CONFIG.duo_host()) { - (true, Some(host)) => Some(Self { - host, - ik: CONFIG.duo_ikey().unwrap(), - sk: CONFIG.duo_skey().unwrap(), - }), - _ => None, - } - } - fn msg(s: &str) -> Self { - Self { - host: s.into(), - ik: s.into(), - sk: s.into(), - } - } - fn secret() -> Self { - Self::msg("<global_secret>") - } - fn obscure(self) -> Self { - let mut host = self.host; - let mut ik = self.ik; - let mut sk = self.sk; - - let digits = 4; - let replaced = "************"; - - host.replace_range(digits.., replaced); - ik.replace_range(digits.., replaced); - sk.replace_range(digits.., replaced); - - Self { - host, - ik, - sk, - } - } -} - -enum DuoStatus { - Global(DuoData), - // Using the global duo config - User(DuoData), - // Using the user's config - Disabled(bool), // True if there is a global setting -} - -impl DuoStatus { - fn data(self) -> Option<DuoData> { - match self { - DuoStatus::Global(data) => Some(data), - DuoStatus::User(data) => Some(data), - DuoStatus::Disabled(_) => None, - } - } -} - -const DISABLED_MESSAGE_DEFAULT: &str = "<To use the global Duo keys, please leave these fields untouched>"; - -#[post("/two-factor/get-duo", data = "<data>")] -async fn get_duo(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult { - let data: PasswordData = data.into_inner().data; - - if !headers.user.check_valid_password(&data.MasterPasswordHash) { - err!("Invalid password"); - } - - let data = get_user_duo_data(&headers.user.uuid, &mut conn).await; - - let (enabled, data) = match data { - DuoStatus::Global(_) => (true, Some(DuoData::secret())), - DuoStatus::User(data) => (true, Some(data.obscure())), - DuoStatus::Disabled(true) => (false, Some(DuoData::msg(DISABLED_MESSAGE_DEFAULT))), - DuoStatus::Disabled(false) => (false, None), - }; - - let json = if let Some(data) = data { - json!({ - "Enabled": enabled, - "Host": data.host, - "SecretKey": data.sk, - "IntegrationKey": data.ik, - "Object": "twoFactorDuo" - }) - } else { - json!({ - "Enabled": enabled, - "Object": "twoFactorDuo" - }) - }; - - Ok(Json(json)) -} - -#[derive(Deserialize)] -#[allow(non_snake_case, dead_code)] -struct EnableDuoData { - MasterPasswordHash: String, - Host: String, - SecretKey: String, - IntegrationKey: String, -} - -impl From<EnableDuoData> for DuoData { - fn from(d: EnableDuoData) -> Self { - Self { - host: d.Host, - ik: d.IntegrationKey, - sk: d.SecretKey, - } - } -} - -fn check_duo_fields_custom(data: &EnableDuoData) -> bool { - fn empty_or_default(s: &str) -> bool { - let st = s.trim(); - st.is_empty() || s == DISABLED_MESSAGE_DEFAULT - } - - !empty_or_default(&data.Host) && !empty_or_default(&data.SecretKey) && !empty_or_default(&data.IntegrationKey) -} - -#[post("/two-factor/duo", data = "<data>")] -async fn activate_duo(data: JsonUpcase<EnableDuoData>, headers: Headers, mut conn: DbConn) -> JsonResult { - let data: EnableDuoData = data.into_inner().data; - let mut user = headers.user; - - if !user.check_valid_password(&data.MasterPasswordHash) { - err!("Invalid password"); - } - - let (data, data_str) = if check_duo_fields_custom(&data) { - let data_req: DuoData = data.into(); - let data_str = serde_json::to_string(&data_req)?; - duo_api_request("GET", "/auth/v2/check", "", &data_req).await.map_res("Failed to validate Duo credentials")?; - (data_req.obscure(), data_str) - } else { - (DuoData::secret(), String::new()) - }; - - let type_ = TwoFactorType::Duo; - let twofactor = TwoFactor::new(user.uuid.clone(), type_, data_str); - twofactor.save(&mut conn).await?; - - _generate_recover_code(&mut user, &mut conn).await; - - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; - - Ok(Json(json!({ - "Enabled": true, - "Host": data.host, - "SecretKey": data.sk, - "IntegrationKey": data.ik, - "Object": "twoFactorDuo" - }))) -} - -#[put("/two-factor/duo", data = "<data>")] -async fn activate_duo_put(data: JsonUpcase<EnableDuoData>, headers: Headers, conn: DbConn) -> JsonResult { - activate_duo(data, headers, conn).await -} - -async fn duo_api_request(method: &str, path: &str, params: &str, data: &DuoData) -> EmptyResult { - use reqwest::{header, Method}; - use std::str::FromStr; - - // https://duo.com/docs/authapi#api-details - let url = format!("https://{}{}", &data.host, path); - let date = Utc::now().to_rfc2822(); - let username = &data.ik; - let fields = [&date, method, &data.host, path, params]; - let password = crypto::hmac_sign(&data.sk, &fields.join("\n")); - - let m = Method::from_str(method).unwrap_or_default(); - - let client = get_reqwest_client(); - - client - .request(m, &url) - .basic_auth(username, Some(password)) - .header(header::USER_AGENT, "vaultwarden:Duo/1.0 (Rust)") - .header(header::DATE, date) - .send() - .await? - .error_for_status()?; - - Ok(()) -} - -const DUO_EXPIRE: i64 = 300; -const APP_EXPIRE: i64 = 3600; - -const AUTH_PREFIX: &str = "AUTH"; -const DUO_PREFIX: &str = "TX"; -const APP_PREFIX: &str = "APP"; - -async fn get_user_duo_data(uuid: &str, conn: &mut DbConn) -> DuoStatus { - let type_ = TwoFactorType::Duo as i32; - - // If the user doesn't have an entry, disabled - let twofactor = match TwoFactor::find_by_user_and_type(uuid, type_, conn).await { - Some(t) => t, - None => return DuoStatus::Disabled(DuoData::global().is_some()), - }; - - // If the user has the required values, we use those - if let Ok(data) = serde_json::from_str(&twofactor.data) { - return DuoStatus::User(data); - } - - // Otherwise, we try to use the globals - if let Some(global) = DuoData::global() { - return DuoStatus::Global(global); - } - - // If there are no globals configured, just disable it - DuoStatus::Disabled(false) -} - -// let (ik, sk, ak, host) = get_duo_keys(); -async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiResult<(String, String, String, String)> { - let data = match User::find_by_mail(email, conn).await { - Some(u) => get_user_duo_data(&u.uuid, conn).await.data(), - _ => DuoData::global(), - } - .map_res("Can't fetch Duo Keys")?; - - Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host)) -} - -pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> { - let now = Utc::now().timestamp(); - - let (ik, sk, ak, host) = get_duo_keys_email(email, conn).await?; - - let duo_sign = sign_duo_values(&sk, email, &ik, DUO_PREFIX, now + DUO_EXPIRE); - let app_sign = sign_duo_values(&ak, email, &ik, APP_PREFIX, now + APP_EXPIRE); - - Ok((format!("{duo_sign}:{app_sign}"), host)) -} - -fn sign_duo_values(key: &str, email: &str, ikey: &str, prefix: &str, expire: i64) -> String { - let val = format!("{email}|{ikey}|{expire}"); - let cookie = format!("{}|{}", prefix, BASE64.encode(val.as_bytes())); - - format!("{}|{}", cookie, crypto::hmac_sign(key, &cookie)) -} - -pub async fn validate_duo_login(email: &str, response: &str, conn: &mut DbConn) -> EmptyResult { - // email is as entered by the user, so it needs to be normalized before - // comparison with auth_user below. - let email = &email.to_lowercase(); - - let split: Vec<&str> = response.split(':').collect(); - if split.len() != 2 { - err!( - "Invalid response length", - ErrorEvent { - event: EventType::UserFailedLogIn2fa - } - ); - } - - let auth_sig = split[0]; - let app_sig = split[1]; - - let now = Utc::now().timestamp(); - - let (ik, sk, ak, _host) = get_duo_keys_email(email, conn).await?; - - let auth_user = parse_duo_values(&sk, auth_sig, &ik, AUTH_PREFIX, now)?; - let app_user = parse_duo_values(&ak, app_sig, &ik, APP_PREFIX, now)?; - - if !crypto::ct_eq(&auth_user, app_user) || !crypto::ct_eq(&auth_user, email) { - err!( - "Error validating duo authentication", - ErrorEvent { - event: EventType::UserFailedLogIn2fa - } - ) - } - - Ok(()) -} - -fn parse_duo_values(key: &str, val: &str, ikey: &str, prefix: &str, time: i64) -> ApiResult<String> { - let split: Vec<&str> = val.split('|').collect(); - if split.len() != 3 { - err!("Invalid value length") - } - - let u_prefix = split[0]; - let u_b64 = split[1]; - let u_sig = split[2]; - - let sig = crypto::hmac_sign(key, &format!("{u_prefix}|{u_b64}")); - - if !crypto::ct_eq(crypto::hmac_sign(key, &sig), crypto::hmac_sign(key, u_sig)) { - err!("Duo signatures don't match") - } - - if u_prefix != prefix { - err!("Prefixes don't match") - } - - let cookie_vec = match BASE64.decode(u_b64.as_bytes()) { - Ok(c) => c, - Err(_) => err!("Invalid Duo cookie encoding"), - }; - - let cookie = match String::from_utf8(cookie_vec) { - Ok(c) => c, - Err(_) => err!("Invalid Duo cookie encoding"), - }; - - let cookie_split: Vec<&str> = cookie.split('|').collect(); - if cookie_split.len() != 3 { - err!("Invalid cookie length") - } - - let username = cookie_split[0]; - let u_ikey = cookie_split[1]; - let expire = cookie_split[2]; - - if !crypto::ct_eq(ikey, u_ikey) { - err!("Invalid ikey") - } - - let expire: i64 = match expire.parse() { - Ok(e) => e, - Err(_) => err!("Invalid expire time"), - }; - - if time >= expire { - err!("Expired authorization") - } - - Ok(username.into()) -} diff --git a/src/api/core/two_factor/email.rs b/src/api/core/two_factor/email.rs @@ -1,337 +0,0 @@ -use chrono::{Duration, NaiveDateTime, Utc}; -use rocket::serde::json::Json; -use rocket::Route; - -use crate::{ - api::{ - core::{log_user_event, two_factor::_generate_recover_code}, - EmptyResult, JsonResult, JsonUpcase, PasswordData, - }, - auth::Headers, - crypto, - db::{ - models::{EventType, TwoFactor, TwoFactorType}, - DbConn, - }, - error::{Error, MapResult}, - mail, CONFIG, -}; - -pub fn routes() -> Vec<Route> { - routes![get_email, send_email_login, send_email, email,] -} - -#[derive(Deserialize)] -#[allow(non_snake_case)] -struct SendEmailLoginData { - Email: String, - MasterPasswordHash: String, -} - -/// User is trying to login and wants to use email 2FA. -/// Does not require Bearer token -#[post("/two-factor/send-email-login", data = "<data>")] // JsonResult -async fn send_email_login(data: JsonUpcase<SendEmailLoginData>, mut conn: DbConn) -> EmptyResult { - let data: SendEmailLoginData = data.into_inner().data; - - use crate::db::models::User; - - // Get the user - let user = match User::find_by_mail(&data.Email, &mut conn).await { - Some(user) => user, - None => err!("Username or password is incorrect. Try again."), - }; - - // Check password - if !user.check_valid_password(&data.MasterPasswordHash) { - err!("Username or password is incorrect. Try again.") - } - - if !CONFIG._enable_email_2fa() { - err!("Email 2FA is disabled") - } - - send_token(&user.uuid, &mut conn).await?; - - Ok(()) -} - -/// Generate the token, save the data for later verification and send email to user -pub async fn send_token(user_uuid: &str, conn: &mut DbConn) -> EmptyResult { - let type_ = TwoFactorType::Email as i32; - let mut twofactor = - TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await.map_res("Two factor not found")?; - - let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); - - let mut twofactor_data = EmailTokenData::from_json(&twofactor.data)?; - twofactor_data.set_token(generated_token); - twofactor.data = twofactor_data.to_json(); - twofactor.save(conn).await?; - - mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?; - - Ok(()) -} - -/// When user clicks on Manage email 2FA show the user the related information -#[post("/two-factor/get-email", data = "<data>")] -async fn get_email(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult { - let data: PasswordData = data.into_inner().data; - let user = headers.user; - - if !user.check_valid_password(&data.MasterPasswordHash) { - err!("Invalid password"); - } - - let (enabled, mfa_email) = - match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::Email as i32, &mut conn).await { - Some(x) => { - let twofactor_data = EmailTokenData::from_json(&x.data)?; - (true, json!(twofactor_data.email)) - } - _ => (false, serde_json::value::Value::Null), - }; - - Ok(Json(json!({ - "Email": mfa_email, - "Enabled": enabled, - "Object": "twoFactorEmail" - }))) -} - -#[derive(Deserialize)] -#[allow(non_snake_case)] -struct SendEmailData { - /// Email where 2FA codes will be sent to, can be different than user email account. - Email: String, - MasterPasswordHash: String, -} - -/// Send a verification email to the specified email address to check whether it exists/belongs to user. -#[post("/two-factor/send-email", data = "<data>")] -async fn send_email(data: JsonUpcase<SendEmailData>, headers: Headers, mut conn: DbConn) -> EmptyResult { - let data: SendEmailData = data.into_inner().data; - let user = headers.user; - - if !user.check_valid_password(&data.MasterPasswordHash) { - err!("Invalid password"); - } - - if !CONFIG._enable_email_2fa() { - err!("Email 2FA is disabled") - } - - let type_ = TwoFactorType::Email as i32; - - if let Some(tf) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { - tf.delete(&mut conn).await?; - } - - let generated_token = crypto::generate_email_token(CONFIG.email_token_size()); - let twofactor_data = EmailTokenData::new(data.Email, generated_token); - - // Uses EmailVerificationChallenge as type to show that it's not verified yet. - let twofactor = TwoFactor::new(user.uuid, TwoFactorType::EmailVerificationChallenge, twofactor_data.to_json()); - twofactor.save(&mut conn).await?; - - mail::send_token(&twofactor_data.email, &twofactor_data.last_token.map_res("Token is empty")?).await?; - - Ok(()) -} - -#[derive(Deserialize, Serialize)] -#[allow(non_snake_case)] -struct EmailData { - Email: String, - MasterPasswordHash: String, - Token: String, -} - -/// Verify email belongs to user and can be used for 2FA email codes. -#[put("/two-factor/email", data = "<data>")] -async fn email(data: JsonUpcase<EmailData>, headers: Headers, mut conn: DbConn) -> JsonResult { - let data: EmailData = data.into_inner().data; - let mut user = headers.user; - - if !user.check_valid_password(&data.MasterPasswordHash) { - err!("Invalid password"); - } - - let type_ = TwoFactorType::EmailVerificationChallenge as i32; - let mut twofactor = - TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await.map_res("Two factor not found")?; - - let mut email_data = EmailTokenData::from_json(&twofactor.data)?; - - let issued_token = match &email_data.last_token { - Some(t) => t, - _ => err!("No token available"), - }; - - if !crypto::ct_eq(issued_token, data.Token) { - err!("Token is invalid") - } - - email_data.reset_token(); - twofactor.atype = TwoFactorType::Email as i32; - twofactor.data = email_data.to_json(); - twofactor.save(&mut conn).await?; - - _generate_recover_code(&mut user, &mut conn).await; - - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; - - Ok(Json(json!({ - "Email": email_data.email, - "Enabled": "true", - "Object": "twoFactorEmail" - }))) -} - -/// Validate the email code when used as TwoFactor token mechanism -pub async fn validate_email_code_str(user_uuid: &str, token: &str, data: &str, conn: &mut DbConn) -> EmptyResult { - let mut email_data = EmailTokenData::from_json(data)?; - let mut twofactor = TwoFactor::find_by_user_and_type(user_uuid, TwoFactorType::Email as i32, conn) - .await - .map_res("Two factor not found")?; - let issued_token = match &email_data.last_token { - Some(t) => t, - _ => err!( - "No token available", - ErrorEvent { - event: EventType::UserFailedLogIn2fa - } - ), - }; - - if !crypto::ct_eq(issued_token, token) { - email_data.add_attempt(); - if email_data.attempts >= CONFIG.email_attempts_limit() { - email_data.reset_token(); - } - twofactor.data = email_data.to_json(); - twofactor.save(conn).await?; - - err!( - "Token is invalid", - ErrorEvent { - event: EventType::UserFailedLogIn2fa - } - ) - } - - email_data.reset_token(); - twofactor.data = email_data.to_json(); - twofactor.save(conn).await?; - - let date = NaiveDateTime::from_timestamp_opt(email_data.token_sent, 0).expect("Email token timestamp invalid."); - let max_time = CONFIG.email_expiration_time() as i64; - if date + Duration::seconds(max_time) < Utc::now().naive_utc() { - err!( - "Token has expired", - ErrorEvent { - event: EventType::UserFailedLogIn2fa - } - ) - } - - Ok(()) -} - -/// Data stored in the TwoFactor table in the db -#[derive(Serialize, Deserialize)] -pub struct EmailTokenData { - /// Email address where the token will be sent to. Can be different from account email. - pub email: String, - /// Some(token): last valid token issued that has not been entered. - /// None: valid token was used and removed. - pub last_token: Option<String>, - /// UNIX timestamp of token issue. - pub token_sent: i64, - /// Amount of token entry attempts for last_token. - pub attempts: u64, -} - -impl EmailTokenData { - pub fn new(email: String, token: String) -> EmailTokenData { - EmailTokenData { - email, - last_token: Some(token), - token_sent: Utc::now().naive_utc().timestamp(), - attempts: 0, - } - } - - pub fn set_token(&mut self, token: String) { - self.last_token = Some(token); - self.token_sent = Utc::now().naive_utc().timestamp(); - } - - pub fn reset_token(&mut self) { - self.last_token = None; - self.attempts = 0; - } - - pub fn add_attempt(&mut self) { - self.attempts += 1; - } - - pub fn to_json(&self) -> String { - serde_json::to_string(&self).unwrap() - } - - pub fn from_json(string: &str) -> Result<EmailTokenData, Error> { - let res: Result<EmailTokenData, crate::serde_json::Error> = serde_json::from_str(string); - match res { - Ok(x) => Ok(x), - Err(_) => err!("Could not decode EmailTokenData from string"), - } - } -} - -/// Takes an email address and obscures it by replacing it with asterisks except two characters. -pub fn obscure_email(email: &str) -> String { - let split: Vec<&str> = email.rsplitn(2, '@').collect(); - - let mut name = split[1].to_string(); - let domain = &split[0]; - - let name_size = name.chars().count(); - - let new_name = match name_size { - 1..=3 => "*".repeat(name_size), - _ => { - let stars = "*".repeat(name_size - 2); - name.truncate(2); - format!("{name}{stars}") - } - }; - - format!("{}@{}", new_name, &domain) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_obscure_email_long() { - let email = "bytes@example.ext"; - - let result = obscure_email(email); - - // Only first two characters should be visible. - assert_eq!(result, "by***@example.ext"); - } - - #[test] - fn test_obscure_email_short() { - let email = "byt@example.ext"; - - let result = obscure_email(email); - - // If it's smaller than 3 characters it should only show asterisks. - assert_eq!(result, "***@example.ext"); - } -} diff --git a/src/api/core/two_factor/mod.rs b/src/api/core/two_factor/mod.rs @@ -1,22 +1,15 @@ -use chrono::{Duration, Utc}; -use data_encoding::BASE32; -use rocket::serde::json::Json; -use rocket::Route; -use serde_json::Value; - use crate::{ - api::{core::log_user_event, JsonResult, JsonUpcase, NumberOrString, PasswordData}, + api::{JsonResult, JsonUpcase, NumberOrString, PasswordData}, auth::{ClientHeaders, Headers}, - crypto, - db::{models::*, DbConn, DbPool}, + db::{models::*, DbConn}, mail, CONFIG, }; +use rocket::serde::json::Json; +use rocket::Route; +use serde_json::Value; pub mod authenticator; -pub mod duo; -pub mod email; pub mod webauthn; -pub mod yubikey; pub fn routes() -> Vec<Route> { let mut routes = routes![ @@ -29,10 +22,7 @@ pub fn routes() -> Vec<Route> { ]; routes.append(&mut authenticator::routes()); - routes.append(&mut duo::routes()); - routes.append(&mut email::routes()); routes.append(&mut webauthn::routes()); - routes.append(&mut yubikey::routes()); routes } @@ -73,7 +63,11 @@ struct RecoverTwoFactor { } #[post("/two-factor/recover", data = "<data>")] -async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeaders, mut conn: DbConn) -> JsonResult { +async fn recover( + data: JsonUpcase<RecoverTwoFactor>, + _client_headers: ClientHeaders, + mut conn: DbConn, +) -> JsonResult { let data: RecoverTwoFactor = data.into_inner().data; use crate::db::models::User; @@ -96,30 +90,12 @@ async fn recover(data: JsonUpcase<RecoverTwoFactor>, client_headers: ClientHeade // Remove all twofactors from the user TwoFactor::delete_all_by_user(&user.uuid, &mut conn).await?; - - log_user_event( - EventType::UserRecovered2fa as i32, - &user.uuid, - client_headers.device_type, - &client_headers.ip.ip, - &mut conn, - ) - .await; - // Remove the recovery code, not needed without twofactors user.totp_recover = None; user.save(&mut conn).await?; Ok(Json(Value::Object(serde_json::Map::new()))) } -async fn _generate_recover_code(user: &mut User, conn: &mut DbConn) { - if user.totp_recover.is_none() { - let totp_recover = crypto::encode_random_bytes::<20>(BASE32); - user.totp_recover = Some(totp_recover); - user.save(conn).await.ok(); - } -} - #[derive(Deserialize)] #[allow(non_snake_case)] struct DisableTwoFactorData { @@ -128,7 +104,11 @@ struct DisableTwoFactorData { } #[post("/two-factor/disable", data = "<data>")] -async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn disable_twofactor( + data: JsonUpcase<DisableTwoFactorData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { let data: DisableTwoFactorData = data.into_inner().data; let password_hash = data.MasterPasswordHash; let user = headers.user; @@ -141,21 +121,26 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head if let Some(twofactor) = TwoFactor::find_by_user_and_type(&user.uuid, type_, &mut conn).await { twofactor.delete(&mut conn).await?; - log_user_event(EventType::UserDisabled2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn) - .await; } - let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn).await.is_empty(); + let twofactor_disabled = TwoFactor::find_by_user(&user.uuid, &mut conn) + .await + .is_empty(); if twofactor_disabled { - for user_org in - UserOrganization::find_by_user_and_policy(&user.uuid, OrgPolicyType::TwoFactorAuthentication, &mut conn) - .await - .into_iter() + for user_org in UserOrganization::find_by_user_and_policy( + &user.uuid, + OrgPolicyType::TwoFactorAuthentication, + &mut conn, + ) + .await + .into_iter() { if user_org.atype < UserOrgType::Admin { if CONFIG.mail_enabled() { - let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn).await.unwrap(); + let org = Organization::find_by_uuid(&user_org.org_uuid, &mut conn) + .await + .unwrap(); mail::send_2fa_removed_from_org(&user.email, &org.name).await?; } user_org.delete(&mut conn).await?; @@ -171,42 +156,14 @@ async fn disable_twofactor(data: JsonUpcase<DisableTwoFactorData>, headers: Head } #[put("/two-factor/disable", data = "<data>")] -async fn disable_twofactor_put(data: JsonUpcase<DisableTwoFactorData>, headers: Headers, conn: DbConn) -> JsonResult { +async fn disable_twofactor_put( + data: JsonUpcase<DisableTwoFactorData>, + headers: Headers, + conn: DbConn, +) -> JsonResult { disable_twofactor(data, headers, conn).await } -pub async fn send_incomplete_2fa_notifications(pool: DbPool) { - debug!("Sending notifications for incomplete 2FA logins"); - - if CONFIG.incomplete_2fa_time_limit() <= 0 || !CONFIG.mail_enabled() { - return; - } - - let mut conn = match pool.get().await { - Ok(conn) => conn, - _ => { - error!("Failed to get DB connection in send_incomplete_2fa_notifications()"); - return; - } - }; - - let now = Utc::now().naive_utc(); - let time_limit = Duration::minutes(CONFIG.incomplete_2fa_time_limit()); - let time_before = now - time_limit; - let incomplete_logins = TwoFactorIncomplete::find_logins_before(&time_before, &mut conn).await; - for login in incomplete_logins { - let user = User::find_by_uuid(&login.user_uuid, &mut conn).await.expect("User not found"); - info!( - "User {} did not complete a 2FA login within the configured time limit. IP: {}", - user.email, login.ip_address - ); - mail::send_incomplete_2fa_login(&user.email, &login.ip_address, &login.login_time, &login.device_name) - .await - .expect("Error sending incomplete 2FA email"); - login.delete(&mut conn).await.expect("Error deleting incomplete 2FA record"); - } -} - // This function currently is just a dummy and the actual part is not implemented yet. // This also prevents 404 errors. // diff --git a/src/api/core/two_factor/webauthn.rs b/src/api/core/two_factor/webauthn.rs @@ -2,13 +2,12 @@ use rocket::serde::json::Json; use rocket::Route; use serde_json::Value; use url::Url; -use webauthn_rs::{base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn}; +use webauthn_rs::{ + base64_data::Base64UrlSafeData, proto::*, AuthenticationState, RegistrationState, Webauthn, +}; use crate::{ - api::{ - core::{log_user_event, two_factor::_generate_recover_code}, - EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData, - }, + api::{EmptyResult, JsonResult, JsonUpcase, NumberOrString, PasswordData}, auth::Headers, db::{ models::{EventType, TwoFactor, TwoFactorType}, @@ -19,7 +18,13 @@ use crate::{ }; pub fn routes() -> Vec<Route> { - routes![get_webauthn, generate_webauthn_challenge, activate_webauthn, activate_webauthn_put, delete_webauthn,] + routes![ + get_webauthn, + generate_webauthn_challenge, + activate_webauthn, + activate_webauthn_put, + delete_webauthn, + ] } // Some old u2f structs still needed for migrating from u2f to WebAuthn @@ -55,7 +60,11 @@ impl WebauthnConfig { let domain = CONFIG.domain(); let domain_origin = CONFIG.domain_origin(); Webauthn::new(Self { - rpid: Url::parse(&domain).map(|u| u.domain().map(str::to_owned)).ok().flatten().unwrap_or_default(), + rpid: Url::parse(&domain) + .map(|u| u.domain().map(str::to_owned)) + .ok() + .flatten() + .unwrap_or_default(), url: domain, origin: Url::parse(&domain_origin).unwrap(), }) @@ -103,17 +112,28 @@ impl WebauthnRegistration { } #[post("/two-factor/get-webauthn", data = "<data>")] -async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn get_webauthn( + data: JsonUpcase<PasswordData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { if !CONFIG.domain_set() { err!("`DOMAIN` environment variable is not set. Webauthn disabled") } - if !headers.user.check_valid_password(&data.data.MasterPasswordHash) { + if !headers + .user + .check_valid_password(&data.data.MasterPasswordHash) + { err!("Invalid password"); } - let (enabled, registrations) = get_webauthn_registrations(&headers.user.uuid, &mut conn).await?; - let registrations_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect(); + let (enabled, registrations) = + get_webauthn_registrations(&headers.user.uuid, &mut conn).await?; + let registrations_json: Vec<Value> = registrations + .iter() + .map(WebauthnRegistration::to_json) + .collect(); Ok(Json(json!({ "Enabled": enabled, @@ -123,8 +143,15 @@ async fn get_webauthn(data: JsonUpcase<PasswordData>, headers: Headers, mut conn } #[post("/two-factor/get-webauthn-challenge", data = "<data>")] -async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult { - if !headers.user.check_valid_password(&data.data.MasterPasswordHash) { +async fn generate_webauthn_challenge( + data: JsonUpcase<PasswordData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { + if !headers + .user + .check_valid_password(&data.data.MasterPasswordHash) + { err!("Invalid password"); } @@ -145,7 +172,9 @@ async fn generate_webauthn_challenge(data: JsonUpcase<PasswordData>, headers: He )?; let type_ = TwoFactorType::WebauthnRegisterChallenge; - TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?).save(&mut conn).await?; + TwoFactor::new(headers.user.uuid, type_, serde_json::to_string(&state)?) + .save(&mut conn) + .await?; let mut challenge_value = serde_json::to_value(challenge.public_key)?; challenge_value["status"] = "ok".into(); @@ -233,19 +262,22 @@ impl From<PublicKeyCredentialCopy> for PublicKeyCredential { signature: r.Response.Signature, user_handle: r.Response.UserHandle, }, - extensions: r.Extensions.map(|e| AuthenticationExtensionsClientOutputs { - appid: e.Appid, - }), + extensions: r + .Extensions + .map(|e| AuthenticationExtensionsClientOutputs { appid: e.Appid }), type_: r.Type, } } } #[post("/two-factor/webauthn", data = "<data>")] -async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Headers, mut conn: DbConn) -> JsonResult { +async fn activate_webauthn( + data: JsonUpcase<EnableWebauthnData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { let data: EnableWebauthnData = data.into_inner().data; - let mut user = headers.user; - + let user = headers.user; if !user.check_valid_password(&data.MasterPasswordHash) { err!("Invalid password"); } @@ -263,7 +295,8 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header // Verify the credentials with the saved state let (credential, _data) = - WebauthnConfig::load().register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?; + WebauthnConfig::load() + .register_credential(&data.DeviceResponse.into(), &state, |_| Ok(false))?; let mut registrations: Vec<_> = get_webauthn_registrations(&user.uuid, &mut conn).await?.1; // TODO: Check for repeated ID's @@ -276,14 +309,17 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header }); // Save the registrations and return them - TwoFactor::new(user.uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(&registrations)?) - .save(&mut conn) - .await?; - _generate_recover_code(&mut user, &mut conn).await; - - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; - - let keys_json: Vec<Value> = registrations.iter().map(WebauthnRegistration::to_json).collect(); + TwoFactor::new( + user.uuid.clone(), + TwoFactorType::Webauthn, + serde_json::to_string(&registrations)?, + ) + .save(&mut conn) + .await?; + let keys_json: Vec<Value> = registrations + .iter() + .map(WebauthnRegistration::to_json) + .collect(); Ok(Json(json!({ "Enabled": true, "Keys": keys_json, @@ -292,60 +328,47 @@ async fn activate_webauthn(data: JsonUpcase<EnableWebauthnData>, headers: Header } #[put("/two-factor/webauthn", data = "<data>")] -async fn activate_webauthn_put(data: JsonUpcase<EnableWebauthnData>, headers: Headers, conn: DbConn) -> JsonResult { +async fn activate_webauthn_put( + data: JsonUpcase<EnableWebauthnData>, + headers: Headers, + conn: DbConn, +) -> JsonResult { activate_webauthn(data, headers, conn).await } #[derive(Deserialize, Debug)] #[allow(non_snake_case)] struct DeleteU2FData { - Id: NumberOrString, MasterPasswordHash: String, } #[delete("/two-factor/webauthn", data = "<data>")] -async fn delete_webauthn(data: JsonUpcase<DeleteU2FData>, headers: Headers, mut conn: DbConn) -> JsonResult { - let id = data.data.Id.into_i32()?; - if !headers.user.check_valid_password(&data.data.MasterPasswordHash) { +async fn delete_webauthn( + data: JsonUpcase<DeleteU2FData>, + headers: Headers, + mut conn: DbConn, +) -> JsonResult { + if !headers + .user + .check_valid_password(&data.data.MasterPasswordHash) + { err!("Invalid password"); } - - let mut tf = - match TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::Webauthn as i32, &mut conn).await { - Some(tf) => tf, - None => err!("Webauthn data not found!"), - }; - - let mut data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?; - - let item_pos = match data.iter().position(|r| r.id == id) { - Some(p) => p, - None => err!("Webauthn entry not found"), + let mut tf = match TwoFactor::find_by_user_and_type( + &headers.user.uuid, + TwoFactorType::Webauthn as i32, + &mut conn, + ) + .await + { + Some(tf) => tf, + None => err!("Webauthn data not found!"), }; - - let removed_item = data.remove(item_pos); + let data: Vec<WebauthnRegistration> = serde_json::from_str(&tf.data)?; tf.data = serde_json::to_string(&data)?; tf.save(&mut conn).await?; drop(tf); - - // If entry is migrated from u2f, delete the u2f entry as well - if let Some(mut u2f) = - TwoFactor::find_by_user_and_type(&headers.user.uuid, TwoFactorType::U2f as i32, &mut conn).await - { - let mut data: Vec<U2FRegistration> = match serde_json::from_str(&u2f.data) { - Ok(d) => d, - Err(_) => err!("Error parsing U2F data"), - }; - - data.retain(|r| r.reg.key_handle != removed_item.credential.cred_id); - let new_data_str = serde_json::to_string(&data)?; - - u2f.data = new_data_str; - u2f.save(&mut conn).await?; - } - let keys_json: Vec<Value> = data.iter().map(WebauthnRegistration::to_json).collect(); - Ok(Json(json!({ "Enabled": true, "Keys": keys_json, @@ -366,27 +389,42 @@ pub async fn get_webauthn_registrations( pub async fn generate_webauthn_login(user_uuid: &str, conn: &mut DbConn) -> JsonResult { // Load saved credentials - let creds: Vec<Credential> = - get_webauthn_registrations(user_uuid, conn).await?.1.into_iter().map(|r| r.credential).collect(); + let creds: Vec<Credential> = get_webauthn_registrations(user_uuid, conn) + .await? + .1 + .into_iter() + .map(|r| r.credential) + .collect(); if creds.is_empty() { err!("No Webauthn devices registered") } // Generate a challenge based on the credentials - let ext = RequestAuthenticationExtensions::builder().appid(format!("{}/app-id.json", &CONFIG.domain())).build(); - let (response, state) = WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?; + let ext = RequestAuthenticationExtensions::builder() + .appid(format!("{}/app-id.json", &CONFIG.domain())) + .build(); + let (response, state) = + WebauthnConfig::load().generate_challenge_authenticate_options(creds, Some(ext))?; // Save the challenge state for later validation - TwoFactor::new(user_uuid.into(), TwoFactorType::WebauthnLoginChallenge, serde_json::to_string(&state)?) - .save(conn) - .await?; + TwoFactor::new( + user_uuid.into(), + TwoFactorType::WebauthnLoginChallenge, + serde_json::to_string(&state)?, + ) + .save(conn) + .await?; // Return challenge to the clients Ok(Json(serde_json::to_value(response.public_key)?)) } -pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut DbConn) -> EmptyResult { +pub async fn validate_webauthn_login( + user_uuid: &str, + response: &str, + conn: &mut DbConn, +) -> EmptyResult { let type_ = TwoFactorType::WebauthnLoginChallenge as i32; let state = match TwoFactor::find_by_user_and_type(user_uuid, type_, conn).await { Some(tf) => { @@ -415,9 +453,13 @@ pub async fn validate_webauthn_login(user_uuid: &str, response: &str, conn: &mut if &reg.credential.cred_id == cred_id { reg.credential.counter = auth_data.counter; - TwoFactor::new(user_uuid.to_string(), TwoFactorType::Webauthn, serde_json::to_string(&registrations)?) - .save(conn) - .await?; + TwoFactor::new( + user_uuid.to_string(), + TwoFactorType::Webauthn, + serde_json::to_string(&registrations)?, + ) + .save(conn) + .await?; return Ok(()); } } diff --git a/src/api/core/two_factor/yubikey.rs b/src/api/core/two_factor/yubikey.rs @@ -1,201 +0,0 @@ -use rocket::serde::json::Json; -use rocket::Route; -use serde_json::Value; -use yubico::{config::Config, verify}; - -use crate::{ - api::{ - core::{log_user_event, two_factor::_generate_recover_code}, - EmptyResult, JsonResult, JsonUpcase, PasswordData, - }, - auth::Headers, - db::{ - models::{EventType, TwoFactor, TwoFactorType}, - DbConn, - }, - error::{Error, MapResult}, - CONFIG, -}; - -pub fn routes() -> Vec<Route> { - routes![generate_yubikey, activate_yubikey, activate_yubikey_put,] -} - -#[derive(Deserialize, Debug)] -#[allow(non_snake_case)] -struct EnableYubikeyData { - MasterPasswordHash: String, - Key1: Option<String>, - Key2: Option<String>, - Key3: Option<String>, - Key4: Option<String>, - Key5: Option<String>, - Nfc: bool, -} - -#[derive(Deserialize, Serialize, Debug)] -#[allow(non_snake_case)] -pub struct YubikeyMetadata { - Keys: Vec<String>, - pub Nfc: bool, -} - -fn parse_yubikeys(data: &EnableYubikeyData) -> Vec<String> { - let data_keys = [&data.Key1, &data.Key2, &data.Key3, &data.Key4, &data.Key5]; - - data_keys.iter().filter_map(|e| e.as_ref().cloned()).collect() -} - -fn jsonify_yubikeys(yubikeys: Vec<String>) -> serde_json::Value { - let mut result = Value::Object(serde_json::Map::new()); - - for (i, key) in yubikeys.into_iter().enumerate() { - result[format!("Key{}", i + 1)] = Value::String(key); - } - - result -} - -fn get_yubico_credentials() -> Result<(String, String), Error> { - if !CONFIG._enable_yubico() { - err!("Yubico support is disabled"); - } - - match (CONFIG.yubico_client_id(), CONFIG.yubico_secret_key()) { - (Some(id), Some(secret)) => Ok((id, secret)), - _ => err!("`YUBICO_CLIENT_ID` or `YUBICO_SECRET_KEY` environment variable is not set. Yubikey OTP Disabled"), - } -} - -async fn verify_yubikey_otp(otp: String) -> EmptyResult { - let (yubico_id, yubico_secret) = get_yubico_credentials()?; - - let config = Config::default().set_client_id(yubico_id).set_key(yubico_secret); - - match CONFIG.yubico_server() { - Some(server) => { - tokio::task::spawn_blocking(move || verify(otp, config.set_api_hosts(vec![server]))).await.unwrap() - } - None => tokio::task::spawn_blocking(move || verify(otp, config)).await.unwrap(), - } - .map_res("Failed to verify OTP") - .and(Ok(())) -} - -#[post("/two-factor/get-yubikey", data = "<data>")] -async fn generate_yubikey(data: JsonUpcase<PasswordData>, headers: Headers, mut conn: DbConn) -> JsonResult { - // Make sure the credentials are set - get_yubico_credentials()?; - - let data: PasswordData = data.into_inner().data; - let user = headers.user; - - if !user.check_valid_password(&data.MasterPasswordHash) { - err!("Invalid password"); - } - - let user_uuid = &user.uuid; - let yubikey_type = TwoFactorType::YubiKey as i32; - - let r = TwoFactor::find_by_user_and_type(user_uuid, yubikey_type, &mut conn).await; - - if let Some(r) = r { - let yubikey_metadata: YubikeyMetadata = serde_json::from_str(&r.data)?; - - let mut result = jsonify_yubikeys(yubikey_metadata.Keys); - - result["Enabled"] = Value::Bool(true); - result["Nfc"] = Value::Bool(yubikey_metadata.Nfc); - result["Object"] = Value::String("twoFactorU2f".to_owned()); - - Ok(Json(result)) - } else { - Ok(Json(json!({ - "Enabled": false, - "Object": "twoFactorU2f", - }))) - } -} - -#[post("/two-factor/yubikey", data = "<data>")] -async fn activate_yubikey(data: JsonUpcase<EnableYubikeyData>, headers: Headers, mut conn: DbConn) -> JsonResult { - let data: EnableYubikeyData = data.into_inner().data; - let mut user = headers.user; - - if !user.check_valid_password(&data.MasterPasswordHash) { - err!("Invalid password"); - } - - // Check if we already have some data - let mut yubikey_data = - match TwoFactor::find_by_user_and_type(&user.uuid, TwoFactorType::YubiKey as i32, &mut conn).await { - Some(data) => data, - None => TwoFactor::new(user.uuid.clone(), TwoFactorType::YubiKey, String::new()), - }; - - let yubikeys = parse_yubikeys(&data); - - if yubikeys.is_empty() { - return Ok(Json(json!({ - "Enabled": false, - "Object": "twoFactorU2f", - }))); - } - - // Ensure they are valid OTPs - for yubikey in &yubikeys { - if yubikey.len() == 12 { - // YubiKey ID - continue; - } - - verify_yubikey_otp(yubikey.to_owned()).await.map_res("Invalid Yubikey OTP provided")?; - } - - let yubikey_ids: Vec<String> = yubikeys.into_iter().map(|x| (x[..12]).to_owned()).collect(); - - let yubikey_metadata = YubikeyMetadata { - Keys: yubikey_ids, - Nfc: data.Nfc, - }; - - yubikey_data.data = serde_json::to_string(&yubikey_metadata).unwrap(); - yubikey_data.save(&mut conn).await?; - - _generate_recover_code(&mut user, &mut conn).await; - - log_user_event(EventType::UserUpdated2fa as i32, &user.uuid, headers.device.atype, &headers.ip.ip, &mut conn).await; - - let mut result = jsonify_yubikeys(yubikey_metadata.Keys); - - result["Enabled"] = Value::Bool(true); - result["Nfc"] = Value::Bool(yubikey_metadata.Nfc); - result["Object"] = Value::String("twoFactorU2f".to_owned()); - - Ok(Json(result)) -} - -#[put("/two-factor/yubikey", data = "<data>")] -async fn activate_yubikey_put(data: JsonUpcase<EnableYubikeyData>, headers: Headers, conn: DbConn) -> JsonResult { - activate_yubikey(data, headers, conn).await -} - -pub async fn validate_yubikey_login(response: &str, twofactor_data: &str) -> EmptyResult { - if response.len() != 44 { - err!("Invalid Yubikey OTP length"); - } - - let yubikey_metadata: YubikeyMetadata = serde_json::from_str(twofactor_data).expect("Can't parse Yubikey Metadata"); - let response_id = &response[..12]; - - if !yubikey_metadata.Keys.contains(&response_id.to_owned()) { - err!("Given Yubikey is not registered"); - } - - let result = verify_yubikey_otp(response.to_owned()).await; - - match result { - Ok(_answer) => Ok(()), - Err(_e) => err!("Failed to verify Yubikey against OTP server"), - } -} diff --git a/src/api/icons.rs b/src/api/icons.rs @@ -1,986 +1,17 @@ -use std::{ - net::IpAddr, - sync::Arc, - time::{Duration, SystemTime}, -}; - -use bytes::{Bytes, BytesMut}; -use futures::{stream::StreamExt, TryFutureExt}; -use once_cell::sync::Lazy; -use regex::Regex; -use reqwest::{ - header::{self, HeaderMap, HeaderValue}, - Client, Response, -}; -use rocket::{http::ContentType, response::Redirect, Route}; -use tokio::{ - fs::{create_dir_all, remove_file, symlink_metadata, File}, - io::{AsyncReadExt, AsyncWriteExt}, - net::lookup_host, -}; - -use html5gum::{Emitter, HtmlString, InfallibleTokenizer, Readable, StringReader, Tokenizer}; - -use crate::{ - error::Error, - util::{get_reqwest_client_builder, Cached}, - CONFIG, -}; - +use crate::{util::Cached, CONFIG}; +use rocket::{http::ContentType, Route}; pub fn routes() -> Vec<Route> { - match CONFIG.icon_service().as_str() { - "internal" => routes![icon_internal], - _ => routes![icon_external], - } + routes![icon_internal] } - -static CLIENT: Lazy<Client> = Lazy::new(|| { - // Generate the default headers - let mut default_headers = HeaderMap::new(); - default_headers.insert(header::USER_AGENT, HeaderValue::from_static("Links (2.22; Linux X86_64; GNU C; text)")); - default_headers.insert(header::ACCEPT, HeaderValue::from_static("text/html, text/*;q=0.5, image/*, */*;q=0.1")); - default_headers.insert(header::ACCEPT_LANGUAGE, HeaderValue::from_static("en,*;q=0.1")); - default_headers.insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache")); - default_headers.insert(header::PRAGMA, HeaderValue::from_static("no-cache")); - - // Generate the cookie store - let cookie_store = Arc::new(Jar::default()); - - let icon_download_timeout = Duration::from_secs(CONFIG.icon_download_timeout()); - let pool_idle_timeout = Duration::from_secs(10); - // Reuse the client between requests - let client = get_reqwest_client_builder() - .cookie_provider(Arc::clone(&cookie_store)) - .timeout(icon_download_timeout) - .pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections - .pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds - .trust_dns(true) - .default_headers(default_headers.clone()); - - match client.build() { - Ok(client) => client, - Err(e) => { - error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'"); - get_reqwest_client_builder() - .cookie_provider(cookie_store) - .timeout(icon_download_timeout) - .pool_max_idle_per_host(5) // Configure the Hyper Pool to only have max 5 idle connections - .pool_idle_timeout(pool_idle_timeout) // Configure the Hyper Pool to timeout after 10 seconds - .trust_dns(false) - .default_headers(default_headers) - .build() - .expect("Failed to build client") - } - } -}); - -// Build Regex only once since this takes a lot of time. -static ICON_SIZE_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"(?x)(\d+)\D*(\d+)").unwrap()); - -// Special HashMap which holds the user defined Regex to speedup matching the regex. -static ICON_BLACKLIST_REGEX: Lazy<dashmap::DashMap<String, Regex>> = Lazy::new(dashmap::DashMap::new); - -async fn icon_redirect(domain: &str, template: &str) -> Option<Redirect> { - if !is_valid_domain(domain) { - warn!("Invalid domain: {}", domain); - return None; - } - - if check_domain_blacklist_reason(domain).await.is_some() { - return None; - } - - let url = template.replace("{}", domain); - match CONFIG.icon_redirect_code() { - 301 => Some(Redirect::moved(url)), // legacy permanent redirect - 302 => Some(Redirect::found(url)), // legacy temporary redirect - 307 => Some(Redirect::temporary(url)), - 308 => Some(Redirect::permanent(url)), - _ => { - error!("Unexpected redirect code {}", CONFIG.icon_redirect_code()); - None - } - } -} - -#[get("/<domain>/icon.png")] -async fn icon_external(domain: &str) -> Option<Redirect> { - icon_redirect(domain, &CONFIG._icon_service_url()).await -} - +#[allow(unused_variables)] #[get("/<domain>/icon.png")] -async fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> { - const FALLBACK_ICON: &[u8] = include_bytes!("../static/images/fallback-icon.png"); - - if !is_valid_domain(domain) { - warn!("Invalid domain: {}", domain); - return Cached::ttl( - (ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), - CONFIG.icon_cache_negttl(), - true, - ); - } - - match get_icon(domain).await { - Some((icon, icon_type)) => { - Cached::ttl((ContentType::new("image", icon_type), icon), CONFIG.icon_cache_ttl(), true) - } - _ => Cached::ttl((ContentType::new("image", "png"), FALLBACK_ICON.to_vec()), CONFIG.icon_cache_negttl(), true), - } -} - -/// Returns if the domain provided is valid or not. -/// -/// This does some manual checks and makes use of Url to do some basic checking. -/// domains can't be larger then 63 characters (not counting multiple subdomains) according to the RFC's, but we limit the total size to 255. -fn is_valid_domain(domain: &str) -> bool { - const ALLOWED_CHARS: &str = "_-."; - - // If parsing the domain fails using Url, it will not work with reqwest. - if let Err(parse_error) = url::Url::parse(format!("https://{domain}").as_str()) { - debug!("Domain parse error: '{}' - {:?}", domain, parse_error); - return false; - } else if domain.is_empty() - || domain.contains("..") - || domain.starts_with('.') - || domain.starts_with('-') - || domain.ends_with('-') - { - debug!( - "Domain validation error: '{}' is either empty, contains '..', starts with an '.', starts or ends with a '-'", - domain - ); - return false; - } else if domain.len() > 255 { - debug!("Domain validation error: '{}' exceeds 255 characters", domain); - return false; - } - - for c in domain.chars() { - if !c.is_alphanumeric() && !ALLOWED_CHARS.contains(c) { - debug!("Domain validation error: '{}' contains an invalid character '{}'", domain, c); - return false; - } - } - - true -} - -/// TODO: This is extracted from IpAddr::is_global, which is unstable: -/// https://doc.rust-lang.org/nightly/std/net/enum.IpAddr.html#method.is_global -/// Remove once https://github.com/rust-lang/rust/issues/27709 is merged -#[allow(clippy::nonminimal_bool)] -#[cfg(not(feature = "unstable"))] -fn is_global(ip: IpAddr) -> bool { - match ip { - IpAddr::V4(ip) => { - // check if this address is 192.0.0.9 or 192.0.0.10. These addresses are the only two - // globally routable addresses in the 192.0.0.0/24 range. - if u32::from(ip) == 0xc0000009 || u32::from(ip) == 0xc000000a { - return true; - } - !ip.is_private() - && !ip.is_loopback() - && !ip.is_link_local() - && !ip.is_broadcast() - && !ip.is_documentation() - && !(ip.octets()[0] == 100 && (ip.octets()[1] & 0b1100_0000 == 0b0100_0000)) - && !(ip.octets()[0] == 192 && ip.octets()[1] == 0 && ip.octets()[2] == 0) - && !(ip.octets()[0] & 240 == 240 && !ip.is_broadcast()) - && !(ip.octets()[0] == 198 && (ip.octets()[1] & 0xfe) == 18) - // Make sure the address is not in 0.0.0.0/8 - && ip.octets()[0] != 0 - } - IpAddr::V6(ip) => { - if ip.is_multicast() && ip.segments()[0] & 0x000f == 14 { - true - } else { - !ip.is_multicast() - && !ip.is_loopback() - && !((ip.segments()[0] & 0xffc0) == 0xfe80) - && !((ip.segments()[0] & 0xfe00) == 0xfc00) - && !ip.is_unspecified() - && !((ip.segments()[0] == 0x2001) && (ip.segments()[1] == 0xdb8)) - } - } - } -} - -#[cfg(feature = "unstable")] -fn is_global(ip: IpAddr) -> bool { - ip.is_global() -} - -/// These are some tests to check that the implementations match -/// The IPv4 can be all checked in 5 mins or so and they are correct as of nightly 2020-07-11 -/// The IPV6 can't be checked in a reasonable time, so we check about ten billion random ones, so far correct -/// Note that the is_global implementation is subject to change as new IP RFCs are created -/// -/// To run while showing progress output: -/// cargo test --features sqlite,unstable -- --nocapture --ignored -#[cfg(test)] -#[cfg(feature = "unstable")] -mod tests { - use super::*; - - #[test] - #[ignore] - fn test_ipv4_global() { - for a in 0..u8::MAX { - println!("Iter: {}/255", a); - for b in 0..u8::MAX { - for c in 0..u8::MAX { - for d in 0..u8::MAX { - let ip = IpAddr::V4(std::net::Ipv4Addr::new(a, b, c, d)); - assert_eq!(ip.is_global(), is_global(ip)) - } - } - } - } - } - - #[test] - #[ignore] - fn test_ipv6_global() { - use ring::rand::{SecureRandom, SystemRandom}; - let mut v = [0u8; 16]; - let rand = SystemRandom::new(); - for i in 0..1_000 { - println!("Iter: {}/1_000", i); - for _ in 0..10_000_000 { - rand.fill(&mut v).expect("Error generating random values"); - let ip = IpAddr::V6(std::net::Ipv6Addr::new( - (v[14] as u16) << 8 | v[15] as u16, - (v[12] as u16) << 8 | v[13] as u16, - (v[10] as u16) << 8 | v[11] as u16, - (v[8] as u16) << 8 | v[9] as u16, - (v[6] as u16) << 8 | v[7] as u16, - (v[4] as u16) << 8 | v[5] as u16, - (v[2] as u16) << 8 | v[3] as u16, - (v[0] as u16) << 8 | v[1] as u16, - )); - assert_eq!(ip.is_global(), is_global(ip)) - } - } - } -} - -#[derive(Clone)] -enum DomainBlacklistReason { - Regex, - IP, -} - -use cached::proc_macro::cached; -#[cached(key = "String", convert = r#"{ domain.to_string() }"#, size = 16, time = 60)] -async fn check_domain_blacklist_reason(domain: &str) -> Option<DomainBlacklistReason> { - // First check the blacklist regex if there is a match. - // This prevents the blocked domain(s) from being leaked via a DNS lookup. - if let Some(blacklist) = CONFIG.icon_blacklist_regex() { - // Use the pre-generate Regex stored in a Lazy HashMap if there's one, else generate it. - let is_match = if let Some(regex) = ICON_BLACKLIST_REGEX.get(&blacklist) { - regex.is_match(domain) - } else { - // Clear the current list if the previous key doesn't exists. - // To prevent growing of the HashMap after someone has changed it via the admin interface. - if ICON_BLACKLIST_REGEX.len() >= 1 { - ICON_BLACKLIST_REGEX.clear(); - } - - // Generate the regex to store in too the Lazy Static HashMap. - let blacklist_regex = Regex::new(&blacklist).unwrap(); - let is_match = blacklist_regex.is_match(domain); - ICON_BLACKLIST_REGEX.insert(blacklist.clone(), blacklist_regex); - - is_match - }; - - if is_match { - debug!("Blacklisted domain: {} matched ICON_BLACKLIST_REGEX", domain); - return Some(DomainBlacklistReason::Regex); - } - } - - if CONFIG.icon_blacklist_non_global_ips() { - if let Ok(s) = lookup_host((domain, 0)).await { - for addr in s { - if !is_global(addr.ip()) { - debug!("IP {} for domain '{}' is not a global IP!", addr.ip(), domain); - return Some(DomainBlacklistReason::IP); - } - } - } - } - - None -} - -async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> { - let path = format!("{}/{}.png", CONFIG.icon_cache_folder(), domain); - - // Check for expiration of negatively cached copy - if icon_is_negcached(&path).await { - return None; - } - - if let Some(icon) = get_cached_icon(&path).await { - let icon_type = match get_icon_type(&icon) { - Some(x) => x, - _ => "x-icon", - }; - return Some((icon, icon_type.to_string())); - } - - if CONFIG.disable_icon_download() { - return None; - } - - // Get the icon, or None in case of error - match download_icon(domain).await { - Ok((icon, icon_type)) => { - save_icon(&path, &icon).await; - Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string())) - } - Err(e) => { - warn!("Unable to download icon: {:?}", e); - let miss_indicator = path + ".miss"; - save_icon(&miss_indicator, &[]).await; - None - } - } -} - -async fn get_cached_icon(path: &str) -> Option<Vec<u8>> { - // Check for expiration of successfully cached copy - if icon_is_expired(path).await { - return None; - } - - // Try to read the cached icon, and return it if it exists - if let Ok(mut f) = File::open(path).await { - let mut buffer = Vec::new(); - - if f.read_to_end(&mut buffer).await.is_ok() { - return Some(buffer); - } - } - - None -} - -async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> { - let meta = symlink_metadata(path).await?; - let modified = meta.modified()?; - let age = SystemTime::now().duration_since(modified)?; - - Ok(ttl > 0 && ttl <= age.as_secs()) -} - -async fn icon_is_negcached(path: &str) -> bool { - let miss_indicator = path.to_owned() + ".miss"; - let expired = file_is_expired(&miss_indicator, CONFIG.icon_cache_negttl()).await; - - match expired { - // No longer negatively cached, drop the marker - Ok(true) => { - if let Err(e) = remove_file(&miss_indicator).await { - error!("Could not remove negative cache indicator for icon {:?}: {:?}", path, e); - } - false - } - // The marker hasn't expired yet. - Ok(false) => true, - // The marker is missing or inaccessible in some way. - Err(_) => false, - } -} - -async fn icon_is_expired(path: &str) -> bool { - let expired = file_is_expired(path, CONFIG.icon_cache_ttl()).await; - expired.unwrap_or(true) -} - -struct Icon { - priority: u8, - href: String, -} - -impl Icon { - const fn new(priority: u8, href: String) -> Self { - Self { - priority, - href, - } - } -} - -fn get_favicons_node( - dom: InfallibleTokenizer<StringReader<'_>, FaviconEmitter>, - icons: &mut Vec<Icon>, - url: &url::Url, -) { - const TAG_LINK: &[u8] = b"link"; - const TAG_BASE: &[u8] = b"base"; - const TAG_HEAD: &[u8] = b"head"; - const ATTR_HREF: &[u8] = b"href"; - const ATTR_SIZES: &[u8] = b"sizes"; - - let mut base_url = url.clone(); - let mut icon_tags: Vec<Tag> = Vec::new(); - for token in dom { - let tag_name: &[u8] = &token.tag.name; - match tag_name { - TAG_LINK => { - icon_tags.push(token.tag); - } - TAG_BASE => { - base_url = if let Some(href) = token.tag.attributes.get(ATTR_HREF) { - let href = std::str::from_utf8(href).unwrap_or_default(); - debug!("Found base href: {href}"); - match base_url.join(href) { - Ok(inner_url) => inner_url, - _ => continue, - } - } else { - continue; - }; - } - TAG_HEAD if token.closing => { - break; - } - _ => { - continue; - } - } - } - - for icon_tag in icon_tags { - if let Some(icon_href) = icon_tag.attributes.get(ATTR_HREF) { - if let Ok(full_href) = base_url.join(std::str::from_utf8(icon_href).unwrap_or_default()) { - let sizes = if let Some(v) = icon_tag.attributes.get(ATTR_SIZES) { - std::str::from_utf8(v).unwrap_or_default() - } else { - "" - }; - let priority = get_icon_priority(full_href.as_str(), sizes); - icons.push(Icon::new(priority, full_href.to_string())); - } - }; - } -} - -struct IconUrlResult { - iconlist: Vec<Icon>, - referer: String, -} - -/// Returns a IconUrlResult which holds a Vector IconList and a string which holds the referer. -/// There will always two items within the iconlist which holds http(s)://domain.tld/favicon.ico. -/// This does not mean that that location does exists, but it is the default location browser use. -/// -/// # Argument -/// * `domain` - A string which holds the domain with extension. -/// -/// # Example -/// ``` -/// let icon_result = get_icon_url("github.com").await?; -/// let icon_result = get_icon_url("vaultwarden.discourse.group").await?; -/// ``` -async fn get_icon_url(domain: &str) -> Result<IconUrlResult, Error> { - // Default URL with secure and insecure schemes - let ssldomain = format!("https://{domain}"); - let httpdomain = format!("http://{domain}"); - - // First check the domain as given during the request for both HTTPS and HTTP. - let resp = match get_page(&ssldomain).or_else(|_| get_page(&httpdomain)).await { - Ok(c) => Ok(c), - Err(e) => { - let mut sub_resp = Err(e); - - // When the domain is not an IP, and has more then one dot, remove all subdomains. - let is_ip = domain.parse::<IpAddr>(); - if is_ip.is_err() && domain.matches('.').count() > 1 { - let mut domain_parts = domain.split('.'); - let base_domain = format!( - "{base}.{tld}", - tld = domain_parts.next_back().unwrap(), - base = domain_parts.next_back().unwrap() - ); - if is_valid_domain(&base_domain) { - let sslbase = format!("https://{base_domain}"); - let httpbase = format!("http://{base_domain}"); - debug!("[get_icon_url]: Trying without subdomains '{base_domain}'"); - - sub_resp = get_page(&sslbase).or_else(|_| get_page(&httpbase)).await; - } - - // When the domain is not an IP, and has less then 2 dots, try to add www. infront of it. - } else if is_ip.is_err() && domain.matches('.').count() < 2 { - let www_domain = format!("www.{domain}"); - if is_valid_domain(&www_domain) { - let sslwww = format!("https://{www_domain}"); - let httpwww = format!("http://{www_domain}"); - debug!("[get_icon_url]: Trying with www. prefix '{www_domain}'"); - - sub_resp = get_page(&sslwww).or_else(|_| get_page(&httpwww)).await; - } - } - sub_resp - } - }; - - // Create the iconlist - let mut iconlist: Vec<Icon> = Vec::new(); - let mut referer = String::new(); - - if let Ok(content) = resp { - // Extract the URL from the response in case redirects occurred (like @ gitlab.com) - let url = content.url().clone(); - - // Set the referer to be used on the final request, some sites check this. - // Mostly used to prevent direct linking and other security reasons. - referer = url.to_string(); - - // Add the fallback favicon.ico and apple-touch-icon.png to the list with the domain the content responded from. - iconlist.push(Icon::new(35, String::from(url.join("/favicon.ico").unwrap()))); - iconlist.push(Icon::new(40, String::from(url.join("/apple-touch-icon.png").unwrap()))); - - // 384KB should be more than enough for the HTML, though as we only really need the HTML header. - let limited_reader = stream_to_bytes_limit(content, 384 * 1024).await?.to_vec(); - - let dom = Tokenizer::new_with_emitter(limited_reader.to_reader(), FaviconEmitter::default()).infallible(); - get_favicons_node(dom, &mut iconlist, &url); - } else { - // Add the default favicon.ico to the list with just the given domain - iconlist.push(Icon::new(35, format!("{ssldomain}/favicon.ico"))); - iconlist.push(Icon::new(40, format!("{ssldomain}/apple-touch-icon.png"))); - iconlist.push(Icon::new(35, format!("{httpdomain}/favicon.ico"))); - iconlist.push(Icon::new(40, format!("{httpdomain}/apple-touch-icon.png"))); - } - - // Sort the iconlist by priority - iconlist.sort_by_key(|x| x.priority); - - // There always is an icon in the list, so no need to check if it exists, and just return the first one - Ok(IconUrlResult { - iconlist, - referer, - }) -} - -async fn get_page(url: &str) -> Result<Response, Error> { - get_page_with_referer(url, "").await -} - -async fn get_page_with_referer(url: &str, referer: &str) -> Result<Response, Error> { - match check_domain_blacklist_reason(url::Url::parse(url).unwrap().host_str().unwrap_or_default()).await { - Some(DomainBlacklistReason::Regex) => warn!("Favicon '{}' is from a blacklisted domain!", url), - Some(DomainBlacklistReason::IP) => warn!("Favicon '{}' is hosted on a non-global IP!", url), - None => (), - } - - let mut client = CLIENT.get(url); - if !referer.is_empty() { - client = client.header("Referer", referer) - } - - match client.send().await { - Ok(c) => c.error_for_status().map_err(Into::into), - Err(e) => err_silent!(format!("{e}")), - } -} - -/// Returns a Integer with the priority of the type of the icon which to prefer. -/// The lower the number the better. -/// -/// # Arguments -/// * `href` - A string which holds the href value or relative path. -/// * `sizes` - The size of the icon if available as a <width>x<height> value like 32x32. -/// -/// # Example -/// ``` -/// priority1 = get_icon_priority("http://example.com/path/to/a/favicon.png", "32x32"); -/// priority2 = get_icon_priority("https://example.com/path/to/a/favicon.ico", ""); -/// ``` -fn get_icon_priority(href: &str, sizes: &str) -> u8 { - // Check if there is a dimension set - let (width, height) = parse_sizes(sizes); - - // Check if there is a size given - if width != 0 && height != 0 { - // Only allow square dimensions - if width == height { - // Change priority by given size - if width == 32 { - 1 - } else if width == 64 { - 2 - } else if (24..=192).contains(&width) { - 3 - } else if width == 16 { - 4 - } else { - 5 - } - // There are dimensions available, but the image is not a square - } else { - 200 - } - } else { - // Change priority by file extension - if href.ends_with(".png") { - 10 - } else if href.ends_with(".jpg") || href.ends_with(".jpeg") { - 20 - } else { - 30 - } - } -} - -/// Returns a Tuple with the width and height as a separate value extracted from the sizes attribute -/// It will return 0 for both values if no match has been found. -/// -/// # Arguments -/// * `sizes` - The size of the icon if available as a <width>x<height> value like 32x32. -/// -/// # Example -/// ``` -/// let (width, height) = parse_sizes("64x64"); // (64, 64) -/// let (width, height) = parse_sizes("x128x128"); // (128, 128) -/// let (width, height) = parse_sizes("32"); // (0, 0) -/// ``` -fn parse_sizes(sizes: &str) -> (u16, u16) { - let mut width: u16 = 0; - let mut height: u16 = 0; - - if !sizes.is_empty() { - match ICON_SIZE_REGEX.captures(sizes.trim()) { - None => {} - Some(dimensions) => { - if dimensions.len() >= 3 { - width = dimensions[1].parse::<u16>().unwrap_or_default(); - height = dimensions[2].parse::<u16>().unwrap_or_default(); - } - } - } - } - - (width, height) -} - -async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> { - match check_domain_blacklist_reason(domain).await { - Some(DomainBlacklistReason::Regex) => err_silent!("Domain is blacklisted", domain), - Some(DomainBlacklistReason::IP) => err_silent!("Host resolves to a non-global IP", domain), - None => (), - } - - let icon_result = get_icon_url(domain).await?; - - let mut buffer = Bytes::new(); - let mut icon_type: Option<&str> = None; - - use data_url::DataUrl; - - for icon in icon_result.iconlist.iter().take(5) { - if icon.href.starts_with("data:image") { - let Ok(datauri) = DataUrl::process(&icon.href) else { - continue; - }; - // Check if we are able to decode the data uri - let mut body = BytesMut::new(); - match datauri.decode::<_, ()>(|bytes| { - body.extend_from_slice(bytes); - Ok(()) - }) { - Ok(_) => { - // Also check if the size is atleast 67 bytes, which seems to be the smallest png i could create - if body.len() >= 67 { - // Check if the icon type is allowed, else try an icon from the list. - icon_type = get_icon_type(&body); - if icon_type.is_none() { - debug!("Icon from {} data:image uri, is not a valid image type", domain); - continue; - } - info!("Extracted icon from data:image uri for {}", domain); - buffer = body.freeze(); - break; - } - } - _ => debug!("Extracted icon from data:image uri is invalid"), - }; - } else { - match get_page_with_referer(&icon.href, &icon_result.referer).await { - Ok(res) => { - buffer = stream_to_bytes_limit(res, 5120 * 1024).await?; // 5120KB/5MB for each icon max (Same as icons.bitwarden.net) - - // Check if the icon type is allowed, else try an icon from the list. - icon_type = get_icon_type(&buffer); - if icon_type.is_none() { - buffer.clear(); - debug!("Icon from {}, is not a valid image type", icon.href); - continue; - } - info!("Downloaded icon from {}", icon.href); - break; - } - Err(e) => debug!("{:?}", e), - }; - } - } - - if buffer.is_empty() { - err_silent!("Empty response or unable find a valid icon", domain); - } - - Ok((buffer, icon_type)) -} - -async fn save_icon(path: &str, icon: &[u8]) { - match File::create(path).await { - Ok(mut f) => { - f.write_all(icon).await.expect("Error writing icon file"); - } - Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => { - create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder"); - } - Err(e) => { - warn!("Unable to save icon: {:?}", e); - } - } -} - -fn get_icon_type(bytes: &[u8]) -> Option<&'static str> { - match bytes { - [137, 80, 78, 71, ..] => Some("png"), - [0, 0, 1, 0, ..] => Some("x-icon"), - [82, 73, 70, 70, ..] => Some("webp"), - [255, 216, 255, ..] => Some("jpeg"), - [71, 73, 70, 56, ..] => Some("gif"), - [66, 77, ..] => Some("bmp"), - _ => None, - } -} - -/// Minimize the amount of bytes to be parsed from a reqwest result. -/// This prevents very long parsing and memory usage. -async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result<Bytes, reqwest::Error> { - let mut stream = res.bytes_stream().take(max_size); - let mut buf = BytesMut::new(); - let mut size = 0; - while let Some(chunk) = stream.next().await { - let chunk = &chunk?; - size += chunk.len(); - buf.extend(chunk); - if size >= max_size { - break; - } - } - Ok(buf.freeze()) -} - -/// This is an implementation of the default Cookie Jar from Reqwest and reqwest_cookie_store build by pfernie. -/// The default cookie jar used by Reqwest keeps all the cookies based upon the Max-Age or Expires which could be a long time. -/// That could be used for tracking, to prevent this we force the lifespan of the cookies to always be max two minutes. -/// A Cookie Jar is needed because some sites force a redirect with cookies to verify if a request uses cookies or not. -use cookie_store::CookieStore; -#[derive(Default)] -pub struct Jar(std::sync::RwLock<CookieStore>); - -impl reqwest::cookie::CookieStore for Jar { - fn set_cookies(&self, cookie_headers: &mut dyn Iterator<Item = &header::HeaderValue>, url: &url::Url) { - use cookie::{Cookie as RawCookie, ParseError as RawCookieParseError}; - use time::Duration; - - let mut cookie_store = self.0.write().unwrap(); - let cookies = cookie_headers.filter_map(|val| { - std::str::from_utf8(val.as_bytes()) - .map_err(RawCookieParseError::from) - .and_then(RawCookie::parse) - .map(|mut c| { - c.set_expires(None); - c.set_max_age(Some(Duration::minutes(2))); - c.into_owned() - }) - .ok() - }); - cookie_store.store_response_cookies(cookies, url); - } - - fn cookies(&self, url: &url::Url) -> Option<header::HeaderValue> { - let cookie_store = self.0.read().unwrap(); - let s = cookie_store - .get_request_values(url) - .map(|(name, value)| format!("{name}={value}")) - .collect::<Vec<_>>() - .join("; "); - - if s.is_empty() { - return None; - } - - header::HeaderValue::from_maybe_shared(Bytes::from(s)).ok() - } -} - -/// Custom FaviconEmitter for the html5gum parser. -/// The FaviconEmitter is using an optimized version of the DefaultEmitter. -/// This prevents emitting tags like comments, doctype and also strings between the tags. -/// But it will also only emit the tags we need and only if they have the correct attributes -/// Therefor parsing the HTML content is faster. -use std::collections::BTreeMap; - -#[derive(Default)] -pub struct Tag { - /// The tag's name, such as `"link"` or `"base"`. - pub name: HtmlString, - - /// A mapping for any HTML attributes this start tag may have. - /// - /// Duplicate attributes are ignored after the first one as per WHATWG spec. - pub attributes: BTreeMap<HtmlString, HtmlString>, -} - -struct FaviconToken { - tag: Tag, - closing: bool, -} - -#[derive(Default)] -struct FaviconEmitter { - current_token: Option<FaviconToken>, - last_start_tag: HtmlString, - current_attribute: Option<(HtmlString, HtmlString)>, - emit_token: bool, -} - -impl FaviconEmitter { - fn flush_current_attribute(&mut self, emit_current_tag: bool) { - const ATTR_HREF: &[u8] = b"href"; - const ATTR_REL: &[u8] = b"rel"; - const TAG_LINK: &[u8] = b"link"; - const TAG_BASE: &[u8] = b"base"; - const TAG_HEAD: &[u8] = b"head"; - - if let Some(ref mut token) = self.current_token { - let tag_name: &[u8] = &token.tag.name; - - if self.current_attribute.is_some() && (tag_name == TAG_BASE || tag_name == TAG_LINK) { - let (k, v) = self.current_attribute.take().unwrap(); - token.tag.attributes.entry(k).and_modify(|_| {}).or_insert(v); - } - - let tag_attr = &token.tag.attributes; - match tag_name { - TAG_HEAD if token.closing => self.emit_token = true, - TAG_BASE if tag_attr.contains_key(ATTR_HREF) => self.emit_token = true, - TAG_LINK if emit_current_tag && tag_attr.contains_key(ATTR_REL) && tag_attr.contains_key(ATTR_HREF) => { - let rel_value = - std::str::from_utf8(token.tag.attributes.get(ATTR_REL).unwrap()).unwrap_or_default(); - if rel_value.contains("icon") && !rel_value.contains("mask-icon") { - self.emit_token = true - } - } - _ => (), - } - } - } -} - -impl Emitter for FaviconEmitter { - type Token = FaviconToken; - - fn set_last_start_tag(&mut self, last_start_tag: Option<&[u8]>) { - self.last_start_tag.clear(); - self.last_start_tag.extend(last_start_tag.unwrap_or_default()); - } - - fn pop_token(&mut self) -> Option<Self::Token> { - if self.emit_token { - self.emit_token = false; - return self.current_token.take(); - } - None - } - - fn init_start_tag(&mut self) { - self.current_token = Some(FaviconToken { - tag: Tag::default(), - closing: false, - }); - } - - fn init_end_tag(&mut self) { - self.current_token = Some(FaviconToken { - tag: Tag::default(), - closing: true, - }); - } - - fn emit_current_tag(&mut self) -> Option<html5gum::State> { - self.flush_current_attribute(true); - self.last_start_tag.clear(); - if self.current_token.is_some() && !self.current_token.as_ref().unwrap().closing { - self.last_start_tag.extend(&*self.current_token.as_ref().unwrap().tag.name); - } - html5gum::naive_next_state(&self.last_start_tag) - } - - fn push_tag_name(&mut self, s: &[u8]) { - if let Some(ref mut token) = self.current_token { - token.tag.name.extend(s); - } - } - - fn init_attribute(&mut self) { - self.flush_current_attribute(false); - self.current_attribute = match &self.current_token { - Some(token) => { - let tag_name: &[u8] = &token.tag.name; - match tag_name { - b"link" | b"head" | b"base" => Some(Default::default()), - _ => None, - } - } - _ => None, - }; - } - - fn push_attribute_name(&mut self, s: &[u8]) { - if let Some(attr) = &mut self.current_attribute { - attr.0.extend(s) - } - } - - fn push_attribute_value(&mut self, s: &[u8]) { - if let Some(attr) = &mut self.current_attribute { - attr.1.extend(s) - } - } - - fn current_is_appropriate_end_tag_token(&mut self) -> bool { - match &self.current_token { - Some(token) if token.closing => !self.last_start_tag.is_empty() && self.last_start_tag == token.tag.name, - _ => false, - } - } - - // We do not want and need these parts of the HTML document - // These will be skipped and ignored during the tokenization and iteration. - fn emit_current_comment(&mut self) {} - fn emit_current_doctype(&mut self) {} - fn emit_eof(&mut self) {} - fn emit_error(&mut self, _: html5gum::Error) {} - fn emit_string(&mut self, _: &[u8]) {} - fn init_comment(&mut self) {} - fn init_doctype(&mut self) {} - fn push_comment(&mut self, _: &[u8]) {} - fn push_doctype_name(&mut self, _: &[u8]) {} - fn push_doctype_public_identifier(&mut self, _: &[u8]) {} - fn push_doctype_system_identifier(&mut self, _: &[u8]) {} - fn set_doctype_public_identifier(&mut self, _: &[u8]) {} - fn set_doctype_system_identifier(&mut self, _: &[u8]) {} - fn set_force_quirks(&mut self) {} - fn set_self_closing(&mut self) {} +fn icon_internal(domain: &str) -> Cached<(ContentType, Vec<u8>)> { + Cached::ttl( + ( + ContentType::new("image", "png"), + include_bytes!("../static/images/fallback-icon.png").to_vec(), + ), + CONFIG.icon_cache_negttl(), + true, + ) } diff --git a/src/api/identity.rs b/src/api/identity.rs @@ -10,8 +10,6 @@ use serde_json::Value; use crate::{ api::{ core::accounts::{PreloginData, RegisterData, _prelogin, _register}, - core::log_user_event, - core::two_factor::{duo, email, email::EmailTokenData, yubikey}, ApiResult, EmptyResult, JsonResult, JsonUpcase, }, auth::{generate_organization_api_key_login_claims, ClientHeaders, ClientIp}, @@ -25,7 +23,11 @@ pub fn routes() -> Vec<Route> { } #[post("/connect/token", data = "<data>")] -async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: DbConn) -> JsonResult { +async fn login( + data: Form<ConnectData>, + client_header: ClientHeaders, + mut conn: DbConn, +) -> JsonResult { let data: ConnectData = data.into_inner(); let mut user_uuid: Option<String> = None; @@ -60,34 +62,6 @@ async fn login(data: Form<ConnectData>, client_header: ClientHeaders, mut conn: } t => err!("Invalid type", t), }; - - if let Some(user_uuid) = user_uuid { - match &login_result { - Ok(_) => { - log_user_event( - EventType::UserLoggedIn as i32, - &user_uuid, - client_header.device_type, - &client_header.ip.ip, - &mut conn, - ) - .await; - } - Err(e) => { - if let Some(ev) = e.get_event() { - log_user_event( - ev.event as i32, - &user_uuid, - client_header.device_type, - &client_header.ip.ip, - &mut conn, - ) - .await - } - } - } - } - login_result } @@ -96,7 +70,9 @@ async fn _refresh_login(data: ConnectData, conn: &mut DbConn) -> JsonResult { let token = data.refresh_token.unwrap(); // Get device by refresh token - let mut device = Device::find_by_refresh_token(&token, conn).await.map_res("Invalid refresh token")?; + let mut device = Device::find_by_refresh_token(&token, conn) + .await + .map_res("Invalid refresh token")?; let scope = "api offline_access"; let scope_vec = vec!["api".into(), "offline_access".into()]; @@ -147,7 +123,10 @@ async fn _password_login( let username = data.username.as_ref().unwrap().trim(); let mut user = match User::find_by_mail(username, conn).await { Some(user) => user, - None => err!("Username or password is incorrect. Try again", format!("IP: {}. Username: {}.", ip.ip, username)), + None => err!( + "Username or password is incorrect. Try again", + format!("IP: {}. Username: {}.", ip.ip, username) + ), }; // Set the user_uuid here to be passed back used for event logging. @@ -156,7 +135,9 @@ async fn _password_login( // Check password let password = data.password.as_ref().unwrap(); if let Some(auth_request_uuid) = data.auth_request.clone() { - if let Some(auth_request) = AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await { + if let Some(auth_request) = + AuthRequest::find_by_uuid(auth_request_uuid.as_str(), conn).await + { if !auth_request.check_access_code(password) { err!( "Username or access code is incorrect. Try again", @@ -191,7 +172,7 @@ async fn _password_login( user.set_password(password, None, false, None); if let Err(e) = user.save(conn).await { - error!("Error updating user: {:#?}", e); + panic!("Error updating user: {:#?}", e); } } @@ -210,7 +191,9 @@ async fn _password_login( if user.verified_at.is_none() && CONFIG.mail_enabled() && CONFIG.signups_verify() { if user.last_verifying_at.is_none() - || now.signed_duration_since(user.last_verifying_at.unwrap()).num_seconds() + || now + .signed_duration_since(user.last_verifying_at.unwrap()) + .num_seconds() > CONFIG.signups_verify_resend_time() as i64 { let resend_limit = CONFIG.signups_verify_resend_limit() as i32; @@ -221,11 +204,11 @@ async fn _password_login( user.login_verify_count += 1; if let Err(e) = user.save(conn).await { - error!("Error updating user: {:#?}", e); + panic!("Error updating user: {:#?}", e); } if let Err(e) = mail::send_verify_email(&user.email, &user.uuid).await { - error!("Error auto-sending email verification email: {:#?}", e); + panic!("Error auto-sending email verification email: {:#?}", e); } } } @@ -240,25 +223,8 @@ async fn _password_login( ) } - let (mut device, new_device) = get_device(&data, conn, &user).await; - + let (mut device, _) = get_device(&data, conn, &user).await; let twofactor_token = twofactor_auth(&user.uuid, &data, &mut device, ip, conn).await?; - - if CONFIG.mail_enabled() && new_device { - if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await { - error!("Error sending new device email: {:#?}", e); - - if CONFIG.require_device_email() { - err!( - "Could not send login notification email. Please contact your administrator.", - ErrorEvent { - event: EventType::UserFailedLogIn - } - ) - } - } - } - // Common let orgs = UserOrganization::find_confirmed_by_user(&user.uuid, conn).await; let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); @@ -358,17 +324,17 @@ async fn _user_api_key_login( if CONFIG.mail_enabled() && new_device { let now = Utc::now().naive_utc(); - if let Err(e) = mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name).await { - error!("Error sending new device email: {:#?}", e); - - if CONFIG.require_device_email() { - err!( - "Could not send login notification email. Please contact your administrator.", - ErrorEvent { - event: EventType::UserFailedLogIn - } - ) - } + if mail::send_new_device_logged_in(&user.email, &ip.ip.to_string(), &now, &device.name) + .await + .is_err() + && CONFIG.require_device_email() + { + err!( + "Could not send login notification email. Please contact your administrator.", + ErrorEvent { + event: EventType::UserFailedLogIn + } + ) } } @@ -378,7 +344,10 @@ async fn _user_api_key_login( let (access_token, expires_in) = device.refresh_tokens(&user, orgs, scope_vec); device.save(conn).await?; - info!("User {} logged in successfully via API key. IP: {}", user.email, ip.ip); + info!( + "User {} logged in successfully via API key. IP: {}", + user.email, ip.ip + ); // Note: No refresh_token is returned. The CLI just repeats the // client_credentials login flow when the existing token expires. @@ -401,7 +370,11 @@ async fn _user_api_key_login( Ok(Json(result)) } -async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: &ClientIp) -> JsonResult { +async fn _organization_api_key_login( + data: ConnectData, + conn: &mut DbConn, + ip: &ClientIp, +) -> JsonResult { // Get the org via the client_id let client_id = data.client_id.as_ref().unwrap(); let org_uuid = match client_id.strip_prefix("organization.") { @@ -416,7 +389,10 @@ async fn _organization_api_key_login(data: ConnectData, conn: &mut DbConn, ip: & // Check API key. let client_secret = data.client_secret.as_ref().unwrap(); if !org_api_key.check_valid_api_key(client_secret) { - err!("Incorrect client_secret", format!("IP: {}. Organization: {}.", ip.ip, org_api_key.org_uuid)) + err!( + "Incorrect client_secret", + format!("IP: {}. Organization: {}.", ip.ip, org_api_key.org_uuid) + ) } let claim = generate_organization_api_key_login_claims(org_api_key.uuid, org_api_key.org_uuid); @@ -436,7 +412,10 @@ async fn get_device(data: &ConnectData, conn: &mut DbConn, user: &User) -> (Devi // On iOS, device_type sends "iOS", on others it sends a number // When unknown or unable to parse, return 14, which is 'Unknown Browser' let device_type = util::try_parse_string(data.device_type.as_ref()).unwrap_or(14); - let device_id = data.device_identifier.clone().expect("No device id provided"); + let device_id = data + .device_identifier + .clone() + .expect("No device id provided"); let device_name = data.device_name.clone().expect("No device name provided"); let mut new_device = false; @@ -460,58 +439,40 @@ async fn twofactor_auth( conn: &mut DbConn, ) -> ApiResult<Option<String>> { let twofactors = TwoFactor::find_by_user(user_uuid, conn).await; - // No twofactor token if twofactor is disabled if twofactors.is_empty() { return Ok(None); } - TwoFactorIncomplete::mark_incomplete(user_uuid, &device.uuid, &device.name, ip, conn).await?; - let twofactor_ids: Vec<_> = twofactors.iter().map(|tf| tf.atype).collect(); let selected_id = data.two_factor_provider.unwrap_or(twofactor_ids[0]); // If we aren't given a two factor provider, assume the first one - let twofactor_code = match data.two_factor_token { Some(ref code) => code, - None => err_json!(_json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, "2FA token not provided"), + None => err_json!( + _json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, + "2FA token not provided" + ), }; - - let selected_twofactor = twofactors.into_iter().find(|tf| tf.atype == selected_id && tf.enabled); - + let selected_twofactor = twofactors + .into_iter() + .find(|tf| tf.atype == selected_id && tf.enabled); use crate::api::core::two_factor as _tf; - use crate::crypto::ct_eq; - let selected_data = _selected_data(selected_twofactor); - let mut remember = data.two_factor_remember.unwrap_or(0); - + let remember = data.two_factor_remember.unwrap_or(0); match TwoFactorType::from_i32(selected_id) { Some(TwoFactorType::Authenticator) => { - _tf::authenticator::validate_totp_code_str(user_uuid, twofactor_code, &selected_data?, ip, conn).await? + _tf::authenticator::validate_totp_code_str( + user_uuid, + twofactor_code, + &selected_data?, + ip, + conn, + ) + .await? } Some(TwoFactorType::Webauthn) => { _tf::webauthn::validate_webauthn_login(user_uuid, twofactor_code, conn).await? } - Some(TwoFactorType::YubiKey) => _tf::yubikey::validate_yubikey_login(twofactor_code, &selected_data?).await?, - Some(TwoFactorType::Duo) => { - _tf::duo::validate_duo_login(data.username.as_ref().unwrap().trim(), twofactor_code, conn).await? - } - Some(TwoFactorType::Email) => { - _tf::email::validate_email_code_str(user_uuid, twofactor_code, &selected_data?, conn).await? - } - - Some(TwoFactorType::Remember) => { - match device.twofactor_remember { - Some(ref code) if !CONFIG.disable_2fa_remember() && ct_eq(code, twofactor_code) => { - remember = 1; // Make sure we also return the token here, otherwise it will only remember the first time - } - _ => { - err_json!( - _json_err_twofactor(&twofactor_ids, user_uuid, conn).await?, - "2FA Remember token not provided" - ) - } - } - } _ => err!( "Invalid two factor provider", ErrorEvent { @@ -534,7 +495,11 @@ fn _selected_data(tf: Option<TwoFactor>) -> ApiResult<String> { tf.map(|t| t.data).map_res("Two factor doesn't exist") } -async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbConn) -> ApiResult<Value> { +async fn _json_err_twofactor( + providers: &[i32], + user_uuid: &str, + conn: &mut DbConn, +) -> ApiResult<Value> { use crate::api::core::two_factor; let mut result = json!({ @@ -551,56 +516,10 @@ async fn _json_err_twofactor(providers: &[i32], user_uuid: &str, conn: &mut DbCo Some(TwoFactorType::Authenticator) => { /* Nothing to do for TOTP */ } Some(TwoFactorType::Webauthn) if CONFIG.domain_set() => { - let request = two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?; + let request = + two_factor::webauthn::generate_webauthn_login(user_uuid, conn).await?; result["TwoFactorProviders2"][provider.to_string()] = request.0; } - - Some(TwoFactorType::Duo) => { - let email = match User::find_by_uuid(user_uuid, conn).await { - Some(u) => u.email, - None => err!("User does not exist"), - }; - - let (signature, host) = duo::generate_duo_signature(&email, conn).await?; - - result["TwoFactorProviders2"][provider.to_string()] = json!({ - "Host": host, - "Signature": signature, - }); - } - - Some(tf_type @ TwoFactorType::YubiKey) => { - let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { - Some(tf) => tf, - None => err!("No YubiKey devices registered"), - }; - - let yubikey_metadata: yubikey::YubikeyMetadata = serde_json::from_str(&twofactor.data)?; - - result["TwoFactorProviders2"][provider.to_string()] = json!({ - "Nfc": yubikey_metadata.Nfc, - }) - } - - Some(tf_type @ TwoFactorType::Email) => { - use crate::api::core::two_factor as _tf; - - let twofactor = match TwoFactor::find_by_user_and_type(user_uuid, tf_type as i32, conn).await { - Some(tf) => tf, - None => err!("No twofactor email registered"), - }; - - // Send email immediately if email is the only 2FA option - if providers.len() == 1 { - _tf::email::send_token(user_uuid, conn).await? - } - - let email_data = EmailTokenData::from_json(&twofactor.data)?; - result["TwoFactorProviders2"][provider.to_string()] = json!({ - "Email": email::obscure_email(&email_data.email), - }) - } - _ => {} } } diff --git a/src/api/mod.rs b/src/api/mod.rs @@ -3,7 +3,6 @@ pub mod core; mod icons; mod identity; mod notifications; -mod push; mod web; use rocket::serde::json::Json; @@ -13,20 +12,14 @@ pub use crate::api::{ admin::catchers as admin_catchers, admin::routes as admin_routes, core::catchers as core_catchers, - core::purge_auth_requests, - core::purge_sends, - core::purge_trashed_ciphers, + core::events_routes as core_events_routes, core::routes as core_routes, - core::two_factor::send_incomplete_2fa_notifications, - core::{emergency_notification_reminder_job, emergency_request_timeout_job}, - core::{event_cleanup_job, events_routes as core_events_routes}, icons::routes as icons_routes, identity::routes as identity_routes, notifications::routes as notifications_routes, - notifications::{start_notification_server, AnonymousNotify, Notify, UpdateType, WS_ANONYMOUS_SUBSCRIPTIONS}, - push::{ - push_cipher_update, push_folder_update, push_logout, push_send_update, push_user_update, register_push_device, - unregister_push_device, + notifications::{ + init_ws_anonymous_subscriptions, init_ws_users, start_notification_server, + ws_anonymous_subscriptions, AnonymousNotify, Notify, UpdateType, }, web::catchers as web_catchers, web::routes as web_routes, @@ -70,9 +63,9 @@ impl NumberOrString { use std::num::ParseIntError as PIE; match self { NumberOrString::Number(n) => Ok(*n), - NumberOrString::String(s) => { - s.parse().map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())) - } + NumberOrString::String(s) => s + .parse() + .map_err(|e: PIE| crate::Error::new("Can't convert to number", e.to_string())), } } } diff --git a/src/api/notifications.rs b/src/api/notifications.rs @@ -1,51 +1,55 @@ -use std::{ - net::{IpAddr, SocketAddr}, - sync::Arc, - time::Duration, -}; - -use chrono::{NaiveDateTime, Utc}; -use rmpv::Value; -use rocket::{ - futures::{SinkExt, StreamExt}, - Route, -}; -use tokio::{ - net::{TcpListener, TcpStream}, - sync::mpsc::Sender, -}; -use tokio_tungstenite::{ - accept_hdr_async, - tungstenite::{handshake, Message}, -}; - use crate::{ auth::{ClientIp, WsAccessTokenHeader}, db::{ models::{Cipher, Folder, Send as DbSend, User}, DbConn, }, - Error, CONFIG, -}; - -use once_cell::sync::Lazy; - -static WS_USERS: Lazy<Arc<WebSocketUsers>> = Lazy::new(|| { - Arc::new(WebSocketUsers { - map: Arc::new(dashmap::DashMap::new()), - }) -}); - -pub static WS_ANONYMOUS_SUBSCRIPTIONS: Lazy<Arc<AnonymousWebSocketSubscriptions>> = Lazy::new(|| { - Arc::new(AnonymousWebSocketSubscriptions { - map: Arc::new(dashmap::DashMap::new()), - }) -}); - -use super::{ - push::push_auth_request, push::push_auth_response, push_cipher_update, push_folder_update, push_logout, - push_send_update, push_user_update, + Error, }; +use chrono::{NaiveDateTime, Utc}; +use rmpv::Value; +use rocket::{futures::StreamExt, Route}; +use std::sync::OnceLock; +use std::{sync::Arc, time::Duration}; +use tokio::sync::mpsc::Sender; +use tokio_tungstenite::tungstenite::Message; +static WS_USERS: OnceLock<Arc<WebSocketUsers>> = OnceLock::new(); +#[inline] +pub fn init_ws_users() { + if WS_USERS + .set(Arc::new(WebSocketUsers { + map: Arc::new(dashmap::DashMap::new()), + })) + .is_err() + { + panic!("WS_USERS must be initialized only once") + } +} +#[inline] +fn ws_users() -> &'static Arc<WebSocketUsers> { + WS_USERS + .get() + .expect("WS_USERS should be initialized in main") +} +pub static WS_ANONYMOUS_SUBSCRIPTIONS: OnceLock<Arc<AnonymousWebSocketSubscriptions>> = + OnceLock::new(); +#[inline] +pub fn init_ws_anonymous_subscriptions() { + if WS_ANONYMOUS_SUBSCRIPTIONS + .set(Arc::new(AnonymousWebSocketSubscriptions { + map: Arc::new(dashmap::DashMap::new()), + })) + .is_err() + { + panic!("WS_ANONYMOUS_SUBSCRIPTIONS must only be initialized once") + } +} +#[inline] +pub fn ws_anonymous_subscriptions() -> &'static Arc<AnonymousWebSocketSubscriptions> { + WS_ANONYMOUS_SUBSCRIPTIONS + .get() + .expect("WS_ANONYMOUS_SUBSCRIPTIONS should be initialized in main") +} pub fn routes() -> Vec<Route> { routes![websockets_hub, anonymous_websockets_hub] @@ -60,23 +64,20 @@ struct WSEntryMapGuard { users: Arc<WebSocketUsers>, user_uuid: String, entry_uuid: uuid::Uuid, - addr: IpAddr, } impl WSEntryMapGuard { - fn new(users: Arc<WebSocketUsers>, user_uuid: String, entry_uuid: uuid::Uuid, addr: IpAddr) -> Self { + fn new(users: Arc<WebSocketUsers>, user_uuid: String, entry_uuid: uuid::Uuid) -> Self { Self { users, user_uuid, entry_uuid, - addr, } } } impl Drop for WSEntryMapGuard { fn drop(&mut self) { - info!("Closing WS connection from {}", self.addr); if let Some(mut entry) = self.users.map.get_mut(&self.user_uuid) { entry.retain(|(uuid, _)| uuid != &self.entry_uuid); } @@ -86,22 +87,19 @@ impl Drop for WSEntryMapGuard { struct WSAnonymousEntryMapGuard { subscriptions: Arc<AnonymousWebSocketSubscriptions>, token: String, - addr: IpAddr, } impl WSAnonymousEntryMapGuard { - fn new(subscriptions: Arc<AnonymousWebSocketSubscriptions>, token: String, addr: IpAddr) -> Self { + fn new(subscriptions: Arc<AnonymousWebSocketSubscriptions>, token: String) -> Self { Self { subscriptions, token, - addr, } } } impl Drop for WSAnonymousEntryMapGuard { fn drop(&mut self) { - info!("Closing WS connection from {}", self.addr); self.subscriptions.map.remove(&self.token); } } @@ -110,12 +108,9 @@ impl Drop for WSAnonymousEntryMapGuard { fn websockets_hub<'r>( ws: rocket_ws::WebSocket, data: WsAccessToken, - ip: ClientIp, + _ip: ClientIp, header_token: WsAccessTokenHeader, ) -> Result<rocket_ws::Stream!['r], Error> { - let addr = ip.ip; - info!("Accepting Rocket WS connection from {addr}"); - let token = if let Some(token) = data.access_token { token } else if let Some(token) = header_token.access_token { @@ -129,15 +124,19 @@ fn websockets_hub<'r>( }; let (mut rx, guard) = { - let users = Arc::clone(&WS_USERS); + let users = Arc::clone(ws_users()); // Add a channel to send messages to this client to the map let entry_uuid = uuid::Uuid::new_v4(); let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100); - users.map.entry(claims.sub.clone()).or_default().push((entry_uuid, tx)); + users + .map + .entry(claims.sub.clone()) + .or_default() + .push((entry_uuid, tx)); // Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map - (rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid, addr)) + (rx, WSEntryMapGuard::new(users, claims.sub, entry_uuid)) }; Ok({ @@ -190,20 +189,17 @@ fn websockets_hub<'r>( fn anonymous_websockets_hub<'r>( ws: rocket_ws::WebSocket, token: String, - ip: ClientIp, + _ip: ClientIp, ) -> Result<rocket_ws::Stream!['r], Error> { - let addr = ip.ip; - info!("Accepting Anonymous Rocket WS connection from {addr}"); - let (mut rx, guard) = { - let subscriptions = Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS); + let subscriptions = Arc::clone(ws_anonymous_subscriptions()); // Add a channel to send messages to this client to the map let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100); subscriptions.map.insert(token.clone(), tx); // Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map - (rx, WSAnonymousEntryMapGuard::new(subscriptions, token, addr)) + (rx, WSAnonymousEntryMapGuard::new(subscriptions, token)) }; Ok({ @@ -330,9 +326,7 @@ impl WebSocketUsers { async fn send_update(&self, user_uuid: &str, data: &[u8]) { if let Some(user) = self.map.get(user_uuid).map(|v| v.clone()) { for (_, sender) in user.iter() { - if let Err(e) = sender.send(Message::binary(data)).await { - error!("Error sending WS update {e}"); - } + _ = sender.send(Message::binary(data)).await; } } } @@ -340,30 +334,28 @@ impl WebSocketUsers { // NOTE: The last modified date needs to be updated before calling these methods pub async fn send_user_update(&self, ut: UpdateType, user: &User) { let data = create_update( - vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], + vec![ + ("UserId".into(), user.uuid.clone().into()), + ("Date".into(), serialize_date(user.updated_at)), + ], ut, None, ); self.send_update(&user.uuid, &data).await; - - if CONFIG.push_enabled() { - push_user_update(ut, user); - } } pub async fn send_logout(&self, user: &User, acting_device_uuid: Option<String>) { let data = create_update( - vec![("UserId".into(), user.uuid.clone().into()), ("Date".into(), serialize_date(user.updated_at))], + vec![ + ("UserId".into(), user.uuid.clone().into()), + ("Date".into(), serialize_date(user.updated_at)), + ], UpdateType::LogOut, acting_device_uuid.clone(), ); self.send_update(&user.uuid, &data).await; - - if CONFIG.push_enabled() { - push_logout(user, acting_device_uuid); - } } pub async fn send_folder_update( @@ -371,7 +363,7 @@ impl WebSocketUsers { ut: UpdateType, folder: &Folder, acting_device_uuid: &String, - conn: &mut DbConn, + _: &mut DbConn, ) { let data = create_update( vec![ @@ -384,10 +376,6 @@ impl WebSocketUsers { ); self.send_update(&folder.user_uuid, &data).await; - - if CONFIG.push_enabled() { - push_folder_update(ut, folder, acting_device_uuid, conn).await; - } } pub async fn send_cipher_update( @@ -397,20 +385,30 @@ impl WebSocketUsers { user_uuids: &[String], acting_device_uuid: &String, collection_uuids: Option<Vec<String>>, - conn: &mut DbConn, + _: &mut DbConn, ) { let org_uuid = convert_option(cipher.organization_uuid.clone()); // Depending if there are collections provided or not, we need to have different values for the following variables. // The user_uuid should be `null`, and the revision date should be set to now, else the clients won't sync the collection change. - let (user_uuid, collection_uuids, revision_date) = if let Some(collection_uuids) = collection_uuids { - ( - Value::Nil, - Value::Array(collection_uuids.into_iter().map(|v| v.into()).collect::<Vec<rmpv::Value>>()), - serialize_date(Utc::now().naive_utc()), - ) - } else { - (convert_option(cipher.user_uuid.clone()), Value::Nil, serialize_date(cipher.updated_at)) - }; + let (user_uuid, collection_uuids, revision_date) = + if let Some(collection_uuids) = collection_uuids { + ( + Value::Nil, + Value::Array( + collection_uuids + .into_iter() + .map(|v| v.into()) + .collect::<Vec<rmpv::Value>>(), + ), + serialize_date(Utc::now().naive_utc()), + ) + } else { + ( + convert_option(cipher.user_uuid.clone()), + Value::Nil, + serialize_date(cipher.updated_at), + ) + }; let data = create_update( vec![ @@ -427,10 +425,6 @@ impl WebSocketUsers { for uuid in user_uuids { self.send_update(uuid, &data).await; } - - if CONFIG.push_enabled() && user_uuids.len() == 1 { - push_cipher_update(ut, cipher, acting_device_uuid, conn).await; - } } pub async fn send_send_update( @@ -438,8 +432,8 @@ impl WebSocketUsers { ut: UpdateType, send: &DbSend, user_uuids: &[String], - acting_device_uuid: &String, - conn: &mut DbConn, + _: &String, + _: &mut DbConn, ) { let user_uuid = convert_option(send.user_uuid.clone()); @@ -456,48 +450,42 @@ impl WebSocketUsers { for uuid in user_uuids { self.send_update(uuid, &data).await; } - if CONFIG.push_enabled() && user_uuids.len() == 1 { - push_send_update(ut, send, acting_device_uuid, conn).await; - } } pub async fn send_auth_request( &self, - user_uuid: &String, - auth_request_uuid: &String, - acting_device_uuid: &String, - conn: &mut DbConn, + user_uuid: &str, + auth_request_uuid: &str, + acting_device_uuid: &str, + _: &mut DbConn, ) { let data = create_update( - vec![("Id".into(), auth_request_uuid.clone().into()), ("UserId".into(), user_uuid.clone().into())], + vec![ + ("Id".into(), auth_request_uuid.to_owned().into()), + ("UserId".into(), user_uuid.to_owned().into()), + ], UpdateType::AuthRequest, Some(acting_device_uuid.to_string()), ); self.send_update(user_uuid, &data).await; - - if CONFIG.push_enabled() { - push_auth_request(user_uuid.to_string(), auth_request_uuid.to_string(), conn).await; - } } pub async fn send_auth_response( &self, - user_uuid: &String, + user_uuid: &str, auth_response_uuid: &str, approving_device_uuid: String, - conn: &mut DbConn, + _: &mut DbConn, ) { let data = create_update( - vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], + vec![ + ("Id".into(), auth_response_uuid.to_owned().into()), + ("UserId".into(), user_uuid.to_owned().into()), + ], UpdateType::AuthRequestResponse, approving_device_uuid.clone().into(), ); self.send_update(auth_response_uuid, &data).await; - - if CONFIG.push_enabled() { - push_auth_response(user_uuid.to_string(), auth_response_uuid.to_string(), approving_device_uuid, conn) - .await; - } } } @@ -509,15 +497,16 @@ pub struct AnonymousWebSocketSubscriptions { impl AnonymousWebSocketSubscriptions { async fn send_update(&self, token: &str, data: &[u8]) { if let Some(sender) = self.map.get(token).map(|v| v.clone()) { - if let Err(e) = sender.send(Message::binary(data)).await { - error!("Error sending WS update {e}"); - } + _ = sender.send(Message::binary(data)).await; } } pub async fn send_auth_response(&self, user_uuid: &String, auth_response_uuid: &str) { let data = create_anonymous_update( - vec![("Id".into(), auth_response_uuid.to_owned().into()), ("UserId".into(), user_uuid.clone().into())], + vec![ + ("Id".into(), auth_response_uuid.to_owned().into()), + ("UserId".into(), user_uuid.clone().into()), + ], UpdateType::AuthRequestResponse, user_uuid.to_string(), ); @@ -540,7 +529,11 @@ impl AnonymousWebSocketSubscriptions { ] ] */ -fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uuid: Option<String>) -> Vec<u8> { +fn create_update( + payload: Vec<(Value, Value)>, + ut: UpdateType, + acting_device_uuid: Option<String>, +) -> Vec<u8> { use rmpv::Value as V; let value = V::Array(vec![ @@ -549,7 +542,12 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui V::Nil, "ReceiveMessage".into(), V::Array(vec![V::Map(vec![ - ("ContextId".into(), acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| V::Nil)), + ( + "ContextId".into(), + acting_device_uuid + .map(|v| v.into()) + .unwrap_or_else(|| V::Nil), + ), ("Type".into(), (ut as i32).into()), ("Payload".into(), payload.into()), ])]), @@ -558,7 +556,11 @@ fn create_update(payload: Vec<(Value, Value)>, ut: UpdateType, acting_device_uui serialize(value) } -fn create_anonymous_update(payload: Vec<(Value, Value)>, ut: UpdateType, user_id: String) -> Vec<u8> { +fn create_anonymous_update( + payload: Vec<(Value, Value)>, + ut: UpdateType, + user_id: String, +) -> Vec<u8> { use rmpv::Value as V; let value = V::Array(vec![ @@ -610,127 +612,6 @@ pub enum UpdateType { pub type Notify<'a> = &'a rocket::State<Arc<WebSocketUsers>>; pub type AnonymousNotify<'a> = &'a rocket::State<Arc<AnonymousWebSocketSubscriptions>>; - pub fn start_notification_server() -> Arc<WebSocketUsers> { - let users = Arc::clone(&WS_USERS); - if CONFIG.websocket_enabled() { - let users2 = Arc::<WebSocketUsers>::clone(&users); - tokio::spawn(async move { - let addr = (CONFIG.websocket_address(), CONFIG.websocket_port()); - info!("Starting WebSockets server on {}:{}", addr.0, addr.1); - let listener = TcpListener::bind(addr).await.expect("Can't listen on websocket port"); - - let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>(); - CONFIG.set_ws_shutdown_handle(shutdown_tx); - - loop { - tokio::select! { - Ok((stream, addr)) = listener.accept() => { - tokio::spawn(handle_connection(stream, Arc::<WebSocketUsers>::clone(&users2), addr)); - } - - _ = &mut shutdown_rx => { - break; - } - } - } - - info!("Shutting down WebSockets server!") - }); - } - - users -} - -async fn handle_connection(stream: TcpStream, users: Arc<WebSocketUsers>, addr: SocketAddr) -> Result<(), Error> { - let mut user_uuid: Option<String> = None; - - info!("Accepting WS connection from {addr}"); - - // Accept connection, do initial handshake, validate auth token and get the user ID - use handshake::server::{Request, Response}; - let mut stream = accept_hdr_async(stream, |req: &Request, res: Response| { - if let Some(token) = get_request_token(req) { - if let Ok(claims) = crate::auth::decode_login(&token) { - user_uuid = Some(claims.sub); - return Ok(res); - } - } - Err(Response::builder().status(401).body(None).unwrap()) - }) - .await?; - - let user_uuid = user_uuid.expect("User UUID should be set after the handshake"); - - let (mut rx, guard) = { - // Add a channel to send messages to this client to the map - let entry_uuid = uuid::Uuid::new_v4(); - let (tx, rx) = tokio::sync::mpsc::channel::<Message>(100); - users.map.entry(user_uuid.clone()).or_default().push((entry_uuid, tx)); - - // Once the guard goes out of scope, the connection will have been closed and the entry will be deleted from the map - (rx, WSEntryMapGuard::new(users, user_uuid, entry_uuid, addr.ip())) - }; - - let _guard = guard; - let mut interval = tokio::time::interval(Duration::from_secs(15)); - loop { - tokio::select! { - res = stream.next() => { - match res { - Some(Ok(message)) => { - match message { - // Respond to any pings - Message::Ping(ping) => stream.send(Message::Pong(ping)).await?, - Message::Pong(_) => {/* Ignored */}, - - // We should receive an initial message with the protocol and version, and we will reply to it - Message::Text(ref message) => { - let msg = message.strip_suffix(RECORD_SEPARATOR as char).unwrap_or(message); - - if serde_json::from_str(msg).ok() == Some(INITIAL_MESSAGE) { - stream.send(Message::binary(INITIAL_RESPONSE)).await?; - continue; - } - } - // Just echo anything else the client sends - _ => stream.send(message).await?, - } - } - _ => break, - } - } - - res = rx.recv() => { - match res { - Some(res) => stream.send(res).await?, - None => break, - } - } - - _ = interval.tick() => stream.send(Message::Ping(create_ping())).await? - } - } - - Ok(()) -} - -fn get_request_token(req: &handshake::server::Request) -> Option<String> { - const ACCESS_TOKEN_KEY: &str = "access_token="; - - if let Some(Ok(auth)) = req.headers().get("Authorization").map(|a| a.to_str()) { - if let Some(token_part) = auth.strip_prefix("Bearer ") { - return Some(token_part.to_owned()); - } - } - - if let Some(params) = req.uri().query() { - let params_iter = params.split('&').take(1); - for val in params_iter { - if let Some(stripped) = val.strip_prefix(ACCESS_TOKEN_KEY) { - return Some(stripped.to_owned()); - } - } - } - None + Arc::clone(ws_users()) } diff --git a/src/api/push.rs b/src/api/push.rs @@ -1,294 +0,0 @@ -use reqwest::header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE}; -use serde_json::Value; -use tokio::sync::RwLock; - -use crate::{ - api::{ApiResult, EmptyResult, UpdateType}, - db::models::{Cipher, Device, Folder, Send, User}, - util::get_reqwest_client, - CONFIG, -}; - -use once_cell::sync::Lazy; -use std::time::{Duration, Instant}; - -#[derive(Deserialize)] -struct AuthPushToken { - access_token: String, - expires_in: i32, -} - -#[derive(Debug)] -struct LocalAuthPushToken { - access_token: String, - valid_until: Instant, -} - -async fn get_auth_push_token() -> ApiResult<String> { - static PUSH_TOKEN: Lazy<RwLock<LocalAuthPushToken>> = Lazy::new(|| { - RwLock::new(LocalAuthPushToken { - access_token: String::new(), - valid_until: Instant::now(), - }) - }); - let push_token = PUSH_TOKEN.read().await; - - if push_token.valid_until.saturating_duration_since(Instant::now()).as_secs() > 0 { - debug!("Auth Push token still valid, no need for a new one"); - return Ok(push_token.access_token.clone()); - } - drop(push_token); // Drop the read lock now - - let installation_id = CONFIG.push_installation_id(); - let client_id = format!("installation.{installation_id}"); - let client_secret = CONFIG.push_installation_key(); - - let params = [ - ("grant_type", "client_credentials"), - ("scope", "api.push"), - ("client_id", &client_id), - ("client_secret", &client_secret), - ]; - - let res = match get_reqwest_client().post("https://identity.bitwarden.com/connect/token").form(&params).send().await - { - Ok(r) => r, - Err(e) => err!(format!("Error getting push token from bitwarden server: {e}")), - }; - - let json_pushtoken = match res.json::<AuthPushToken>().await { - Ok(r) => r, - Err(e) => err!(format!("Unexpected push token received from bitwarden server: {e}")), - }; - - let mut push_token = PUSH_TOKEN.write().await; - push_token.valid_until = Instant::now() - .checked_add(Duration::new((json_pushtoken.expires_in / 2) as u64, 0)) // Token valid for half the specified time - .unwrap(); - - push_token.access_token = json_pushtoken.access_token; - - debug!("Token still valid for {}", push_token.valid_until.saturating_duration_since(Instant::now()).as_secs()); - Ok(push_token.access_token.clone()) -} - -pub async fn register_push_device(user_uuid: String, device: Device) -> EmptyResult { - if !CONFIG.push_enabled() { - return Ok(()); - } - let auth_push_token = get_auth_push_token().await?; - - //Needed to register a device for push to bitwarden : - let data = json!({ - "userId": user_uuid, - "deviceId": device.push_uuid, - "identifier": device.uuid, - "type": device.atype, - "pushToken": device.push_token - }); - - let auth_header = format!("Bearer {}", &auth_push_token); - - get_reqwest_client() - .post(CONFIG.push_relay_uri() + "/push/register") - .header(CONTENT_TYPE, "application/json") - .header(ACCEPT, "application/json") - .header(AUTHORIZATION, auth_header) - .json(&data) - .send() - .await? - .error_for_status()?; - Ok(()) -} - -pub async fn unregister_push_device(uuid: String) -> EmptyResult { - if !CONFIG.push_enabled() { - return Ok(()); - } - let auth_push_token = get_auth_push_token().await?; - - let auth_header = format!("Bearer {}", &auth_push_token); - - match get_reqwest_client() - .delete(CONFIG.push_relay_uri() + "/push/" + &uuid) - .header(AUTHORIZATION, auth_header) - .send() - .await - { - Ok(r) => r, - Err(e) => err!(format!("An error occurred during device unregistration: {e}")), - }; - Ok(()) -} - -pub async fn push_cipher_update( - ut: UpdateType, - cipher: &Cipher, - acting_device_uuid: &String, - conn: &mut crate::db::DbConn, -) { - // We shouldn't send a push notification on cipher update if the cipher belongs to an organization, this isn't implemented in the upstream server too. - if cipher.organization_uuid.is_some() { - return; - }; - let user_uuid = match &cipher.user_uuid { - Some(c) => c, - None => { - debug!("Cipher has no uuid"); - return; - } - }; - - if Device::check_user_has_push_device(user_uuid, conn).await { - send_to_push_relay(json!({ - "userId": user_uuid, - "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, - "type": ut as i32, - "payload": { - "id": cipher.uuid, - "userId": cipher.user_uuid, - "organizationId": (), - "revisionDate": cipher.updated_at - } - })) - .await; - } -} - -pub fn push_logout(user: &User, acting_device_uuid: Option<String>) { - let acting_device_uuid: Value = acting_device_uuid.map(|v| v.into()).unwrap_or_else(|| Value::Null); - - tokio::task::spawn(send_to_push_relay(json!({ - "userId": user.uuid, - "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, - "type": UpdateType::LogOut as i32, - "payload": { - "userId": user.uuid, - "date": user.updated_at - } - }))); -} - -pub fn push_user_update(ut: UpdateType, user: &User) { - tokio::task::spawn(send_to_push_relay(json!({ - "userId": user.uuid, - "organizationId": (), - "deviceId": (), - "identifier": (), - "type": ut as i32, - "payload": { - "userId": user.uuid, - "date": user.updated_at - } - }))); -} - -pub async fn push_folder_update( - ut: UpdateType, - folder: &Folder, - acting_device_uuid: &String, - conn: &mut crate::db::DbConn, -) { - if Device::check_user_has_push_device(&folder.user_uuid, conn).await { - tokio::task::spawn(send_to_push_relay(json!({ - "userId": folder.user_uuid, - "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, - "type": ut as i32, - "payload": { - "id": folder.uuid, - "userId": folder.user_uuid, - "revisionDate": folder.updated_at - } - }))); - } -} - -pub async fn push_send_update(ut: UpdateType, send: &Send, acting_device_uuid: &String, conn: &mut crate::db::DbConn) { - if let Some(s) = &send.user_uuid { - if Device::check_user_has_push_device(s, conn).await { - tokio::task::spawn(send_to_push_relay(json!({ - "userId": send.user_uuid, - "organizationId": (), - "deviceId": acting_device_uuid, - "identifier": acting_device_uuid, - "type": ut as i32, - "payload": { - "id": send.uuid, - "userId": send.user_uuid, - "revisionDate": send.revision_date - } - }))); - } - } -} - -async fn send_to_push_relay(notification_data: Value) { - if !CONFIG.push_enabled() { - return; - } - - let auth_push_token = match get_auth_push_token().await { - Ok(s) => s, - Err(e) => { - debug!("Could not get the auth push token: {}", e); - return; - } - }; - - let auth_header = format!("Bearer {}", &auth_push_token); - - if let Err(e) = get_reqwest_client() - .post(CONFIG.push_relay_uri() + "/push/send") - .header(ACCEPT, "application/json") - .header(CONTENT_TYPE, "application/json") - .header(AUTHORIZATION, &auth_header) - .json(&notification_data) - .send() - .await - { - error!("An error occurred while sending a send update to the push relay: {}", e); - }; -} - -pub async fn push_auth_request(user_uuid: String, auth_request_uuid: String, conn: &mut crate::db::DbConn) { - if Device::check_user_has_push_device(user_uuid.as_str(), conn).await { - tokio::task::spawn(send_to_push_relay(json!({ - "userId": user_uuid, - "organizationId": (), - "deviceId": null, - "identifier": null, - "type": UpdateType::AuthRequest as i32, - "payload": { - "id": auth_request_uuid, - "userId": user_uuid, - } - }))); - } -} - -pub async fn push_auth_response( - user_uuid: String, - auth_request_uuid: String, - approving_device_uuid: String, - conn: &mut crate::db::DbConn, -) { - if Device::check_user_has_push_device(user_uuid.as_str(), conn).await { - tokio::task::spawn(send_to_push_relay(json!({ - "userId": user_uuid, - "organizationId": (), - "deviceId": approving_device_uuid, - "identifier": approving_device_uuid, - "type": UpdateType::AuthRequestResponse as i32, - "payload": { - "id": auth_request_uuid, - "userId": user_uuid, - } - }))); - } -} diff --git a/src/api/web.rs b/src/api/web.rs @@ -1,6 +1,9 @@ use std::path::{Path, PathBuf}; -use rocket::{fs::NamedFile, http::ContentType, response::content::RawHtml as Html, serde::json::Json, Catcher, Route}; +use rocket::{ + fs::NamedFile, http::ContentType, response::content::RawHtml as Html, serde::json::Json, + Catcher, Route, +}; use serde_json::Value; use crate::{ @@ -18,12 +21,6 @@ pub fn routes() -> Vec<Route> { if CONFIG.web_vault_enabled() { routes.append(&mut routes![web_index, web_index_head, app_id, web_files]); } - - #[cfg(debug_assertions)] - if CONFIG.reload_templates() { - routes.append(&mut routes![_static_files_dev]); - } - routes } @@ -47,7 +44,12 @@ fn not_found() -> ApiResult<Html<String>> { #[get("/")] async fn web_index() -> Cached<Option<NamedFile>> { - Cached::short(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")).await.ok(), false) + Cached::short( + NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join("index.html")) + .await + .ok(), + false, + ) } #[head("/")] @@ -95,7 +97,12 @@ fn app_id() -> Cached<(ContentType, Json<Value>)> { #[get("/<p..>", rank = 10)] // Only match this if the other routes don't match async fn web_files(p: PathBuf) -> Cached<Option<NamedFile>> { - Cached::long(NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)).await.ok(), true) + Cached::long( + NamedFile::open(Path::new(&CONFIG.web_vault_folder()).join(p)) + .await + .ok(), + true, + ) } #[get("/attachments/<uuid>/<file_id>?<token>")] @@ -107,7 +114,13 @@ async fn attachments(uuid: SafeString, file_id: SafeString, token: String) -> Op return None; } - NamedFile::open(Path::new(&CONFIG.attachments_folder()).join(uuid).join(file_id)).await.ok() + NamedFile::open( + Path::new(&CONFIG.attachments_folder()) + .join(uuid) + .join(file_id), + ) + .await + .ok() } // We use DbConn here to let the alive healthcheck also verify the database connection. @@ -123,59 +136,86 @@ fn alive_head(_conn: DbConn) -> EmptyResult { // due to <https://github.com/SergioBenitez/Rocket/issues/1098>. Ok(()) } - -// This endpoint/function is used during development and development only. -// It allows to easily develop the admin interface by always loading the files from disk instead from a slice of bytes -// This will only be active during a debug build and only when `RELOAD_TEMPLATES` is set to `true` -// NOTE: Do not forget to add any new files added to the `static_files` function below! -#[cfg(debug_assertions)] -#[get("/vw_static/<filename>", rank = 1)] -pub async fn _static_files_dev(filename: PathBuf) -> Option<NamedFile> { - warn!("LOADING STATIC FILES FROM DISK"); - let file = filename.to_str().unwrap_or_default(); - let ext = filename.extension().unwrap_or_default(); - - let path = if ext == "png" || ext == "svg" { - tokio::fs::canonicalize(Path::new(file!()).parent().unwrap().join("../static/images/").join(file)).await - } else { - tokio::fs::canonicalize(Path::new(file!()).parent().unwrap().join("../static/scripts/").join(file)).await - }; - - if let Ok(path) = path { - return NamedFile::open(path).await.ok(); - }; - None -} - #[get("/vw_static/<filename>", rank = 2)] pub fn static_files(filename: &str) -> Result<(ContentType, &'static [u8]), Error> { match filename { "404.png" => Ok((ContentType::PNG, include_bytes!("../static/images/404.png"))), - "mail-github.png" => Ok((ContentType::PNG, include_bytes!("../static/images/mail-github.png"))), - "logo-gray.png" => Ok((ContentType::PNG, include_bytes!("../static/images/logo-gray.png"))), - "error-x.svg" => Ok((ContentType::SVG, include_bytes!("../static/images/error-x.svg"))), - "hibp.png" => Ok((ContentType::PNG, include_bytes!("../static/images/hibp.png"))), - "vaultwarden-icon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-icon.png"))), - "vaultwarden-favicon.png" => Ok((ContentType::PNG, include_bytes!("../static/images/vaultwarden-favicon.png"))), - "404.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/404.css"))), - "admin.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/admin.css"))), - "admin.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin.js"))), - "admin_settings.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_settings.js"))), - "admin_users.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_users.js"))), - "admin_organizations.js" => { - Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_organizations.js"))) - } - "admin_diagnostics.js" => { - Ok((ContentType::JavaScript, include_bytes!("../static/scripts/admin_diagnostics.js"))) - } - "bootstrap.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/bootstrap.css"))), - "bootstrap.bundle.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/bootstrap.bundle.js"))), - "jdenticon.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jdenticon.js"))), - "datatables.js" => Ok((ContentType::JavaScript, include_bytes!("../static/scripts/datatables.js"))), - "datatables.css" => Ok((ContentType::CSS, include_bytes!("../static/scripts/datatables.css"))), - "jquery-3.7.0.slim.js" => { - Ok((ContentType::JavaScript, include_bytes!("../static/scripts/jquery-3.7.0.slim.js"))) - } + "mail-github.png" => Ok(( + ContentType::PNG, + include_bytes!("../static/images/mail-github.png"), + )), + "logo-gray.png" => Ok(( + ContentType::PNG, + include_bytes!("../static/images/logo-gray.png"), + )), + "error-x.svg" => Ok(( + ContentType::SVG, + include_bytes!("../static/images/error-x.svg"), + )), + "hibp.png" => Ok(( + ContentType::PNG, + include_bytes!("../static/images/hibp.png"), + )), + "vaultwarden-icon.png" => Ok(( + ContentType::PNG, + include_bytes!("../static/images/vaultwarden-icon.png"), + )), + "vaultwarden-favicon.png" => Ok(( + ContentType::PNG, + include_bytes!("../static/images/vaultwarden-favicon.png"), + )), + "404.css" => Ok(( + ContentType::CSS, + include_bytes!("../static/scripts/404.css"), + )), + "admin.css" => Ok(( + ContentType::CSS, + include_bytes!("../static/scripts/admin.css"), + )), + "admin.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/admin.js"), + )), + "admin_settings.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/admin_settings.js"), + )), + "admin_users.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/admin_users.js"), + )), + "admin_organizations.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/admin_organizations.js"), + )), + "admin_diagnostics.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/admin_diagnostics.js"), + )), + "bootstrap.css" => Ok(( + ContentType::CSS, + include_bytes!("../static/scripts/bootstrap.css"), + )), + "bootstrap.bundle.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/bootstrap.bundle.js"), + )), + "jdenticon.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/jdenticon.js"), + )), + "datatables.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/datatables.js"), + )), + "datatables.css" => Ok(( + ContentType::CSS, + include_bytes!("../static/scripts/datatables.css"), + )), + "jquery-3.7.0.slim.js" => Ok(( + ContentType::JavaScript, + include_bytes!("../static/scripts/jquery-3.7.0.slim.js"), + )), _ => err!(format!("Static file not found: {filename}")), } } diff --git a/src/auth.rs b/src/auth.rs @@ -15,25 +15,31 @@ const JWT_ALGORITHM: Algorithm = Algorithm::RS256; pub static DEFAULT_VALIDITY: Lazy<Duration> = Lazy::new(|| Duration::hours(2)); static JWT_HEADER: Lazy<Header> = Lazy::new(|| Header::new(JWT_ALGORITHM)); -pub static JWT_LOGIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|login", CONFIG.domain_origin())); +pub static JWT_LOGIN_ISSUER: Lazy<String> = + Lazy::new(|| format!("{}|login", CONFIG.domain_origin())); static JWT_INVITE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|invite", CONFIG.domain_origin())); static JWT_EMERGENCY_ACCESS_INVITE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|emergencyaccessinvite", CONFIG.domain_origin())); static JWT_DELETE_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|delete", CONFIG.domain_origin())); -static JWT_VERIFYEMAIL_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|verifyemail", CONFIG.domain_origin())); -static JWT_ADMIN_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|admin", CONFIG.domain_origin())); +static JWT_VERIFYEMAIL_ISSUER: Lazy<String> = + Lazy::new(|| format!("{}|verifyemail", CONFIG.domain_origin())); static JWT_SEND_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|send", CONFIG.domain_origin())); -static JWT_ORG_API_KEY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin())); -static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin())); +static JWT_ORG_API_KEY_ISSUER: Lazy<String> = + Lazy::new(|| format!("{}|api.organization", CONFIG.domain_origin())); +static JWT_FILE_DOWNLOAD_ISSUER: Lazy<String> = + Lazy::new(|| format!("{}|file_download", CONFIG.domain_origin())); static PRIVATE_RSA_KEY: Lazy<EncodingKey> = Lazy::new(|| { - let key = - std::fs::read(CONFIG.private_rsa_key()).unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}")); - EncodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}")) + let key = std::fs::read(CONFIG.private_rsa_key()) + .unwrap_or_else(|e| panic!("Error loading private RSA Key. \n{e}")); + EncodingKey::from_rsa_pem(&key) + .unwrap_or_else(|e| panic!("Error decoding private RSA Key.\n{e}")) }); static PUBLIC_RSA_KEY: Lazy<DecodingKey> = Lazy::new(|| { - let key = std::fs::read(CONFIG.public_rsa_key()).unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}")); - DecodingKey::from_rsa_pem(&key).unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}")) + let key = std::fs::read(CONFIG.public_rsa_key()) + .unwrap_or_else(|e| panic!("Error loading public RSA Key. \n{e}")); + DecodingKey::from_rsa_pem(&key) + .unwrap_or_else(|e| panic!("Error decoding public RSA Key.\n{e}")) }); pub fn load_keys() { @@ -75,7 +81,9 @@ pub fn decode_invite(token: &str) -> Result<InviteJwtClaims, Error> { decode_jwt(token, JWT_INVITE_ISSUER.to_string()) } -pub fn decode_emergency_access_invite(token: &str) -> Result<EmergencyAccessInviteJwtClaims, Error> { +pub fn decode_emergency_access_invite( + token: &str, +) -> Result<EmergencyAccessInviteJwtClaims, Error> { decode_jwt(token, JWT_EMERGENCY_ACCESS_INVITE_ISSUER.to_string()) } @@ -87,10 +95,6 @@ pub fn decode_verify_email(token: &str) -> Result<BasicJwtClaims, Error> { decode_jwt(token, JWT_VERIFYEMAIL_ISSUER.to_string()) } -pub fn decode_admin(token: &str) -> Result<BasicJwtClaims, Error> { - decode_jwt(token, JWT_ADMIN_ISSUER.to_string()) -} - pub fn decode_send(token: &str) -> Result<BasicJwtClaims, Error> { decode_jwt(token, JWT_SEND_ISSUER.to_string()) } @@ -226,7 +230,10 @@ pub struct OrgApiKeyLoginJwtClaims { pub scope: Vec<String>, } -pub fn generate_organization_api_key_login_claims(uuid: String, org_id: String) -> OrgApiKeyLoginJwtClaims { +pub fn generate_organization_api_key_login_claims( + uuid: String, + org_id: String, +) -> OrgApiKeyLoginJwtClaims { let time_now = Utc::now().naive_utc(); OrgApiKeyLoginJwtClaims { nbf: time_now.timestamp(), @@ -298,16 +305,6 @@ pub fn generate_verify_email_claims(uuid: String) -> BasicJwtClaims { } } -pub fn generate_admin_claims() -> BasicJwtClaims { - let time_now = Utc::now().naive_utc(); - BasicJwtClaims { - nbf: time_now.timestamp(), - exp: (time_now + Duration::minutes(CONFIG.admin_session_lifetime())).timestamp(), - iss: JWT_ADMIN_ISSUER.to_string(), - sub: "admin_panel".to_string(), - } -} - pub fn generate_send_claims(send_id: &str, file_id: &str) -> BasicJwtClaims { let time_now = Utc::now().naive_utc(); BasicJwtClaims { @@ -327,7 +324,9 @@ use rocket::{ }; use crate::db::{ - models::{Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException}, + models::{ + Collection, Device, User, UserOrgStatus, UserOrgType, UserOrganization, UserStampException, + }, DbConn, }; @@ -370,9 +369,7 @@ impl<'r> FromRequest<'r> for Host { format!("{protocol}://{host}") }; - Outcome::Success(Host { - host, - }) + Outcome::Success(Host { host }) } } @@ -393,8 +390,11 @@ impl<'r> FromRequest<'r> for ClientHeaders { _ => err_handler!("Error getting Client IP"), }; // When unknown or unable to parse, return 14, which is 'Unknown Browser' - let device_type: i32 = - request.headers().get_one("device-type").map(|d| d.parse().unwrap_or(14)).unwrap_or_else(|| 14); + let device_type: i32 = request + .headers() + .get_one("device-type") + .map(|d| d.parse().unwrap_or(14)) + .unwrap_or_else(|| 14); Outcome::Success(ClientHeaders { host, @@ -447,7 +447,8 @@ impl<'r> FromRequest<'r> for Headers { _ => err_handler!("Error getting DB"), }; - let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await { + let device = match Device::find_by_uuid_and_user(&device_uuid, &user_uuid, &mut conn).await + { Some(device) => device, None => err_handler!("Invalid device id"), }; @@ -458,8 +459,10 @@ impl<'r> FromRequest<'r> for Headers { }; if user.security_stamp != claims.sstamp { - if let Some(stamp_exception) = - user.stamp_exception.as_deref().and_then(|s| serde_json::from_str::<UserStampException>(s).ok()) + if let Some(stamp_exception) = user + .stamp_exception + .as_deref() + .and_then(|s| serde_json::from_str::<UserStampException>(s).ok()) { let current_route = match request.route().and_then(|r| r.name.as_deref()) { Some(name) => name, @@ -474,12 +477,12 @@ impl<'r> FromRequest<'r> for Headers { // This prevents checking this stamp exception for new requests. let mut user = user; user.reset_stamp_exception(); - if let Err(e) = user.save(&mut conn).await { - error!("Error updating user: {:#?}", e); - } + _ = user.save(&mut conn).await; err_handler!("Stamp exception is expired") } else if !stamp_exception.routes.contains(&current_route.to_string()) { - err_handler!("Invalid security stamp: Current route and exception route do not match") + err_handler!( + "Invalid security stamp: Current route and exception route do not match" + ) } else if stamp_exception.security_stamp != claims.sstamp { err_handler!("Invalid security stamp for matched stamp exception") } @@ -542,16 +545,21 @@ impl<'r> FromRequest<'r> for OrgHeaders { }; let user = headers.user; - let org_user = match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn).await { - Some(user) => { - if user.status == UserOrgStatus::Confirmed as i32 { - user - } else { - err_handler!("The current user isn't confirmed member of the organization") + let org_user = + match UserOrganization::find_by_user_and_org(&user.uuid, org_id, &mut conn) + .await + { + Some(user) => { + if user.status == UserOrgStatus::Confirmed as i32 { + user + } else { + err_handler!( + "The current user isn't confirmed member of the organization" + ) + } } - } - None => err_handler!("The current user isn't member of the organization"), - }; + None => err_handler!("The current user isn't member of the organization"), + }; Outcome::Success(Self { host: headers.host, @@ -590,7 +598,10 @@ impl<'r> FromRequest<'r> for AdminHeaders { async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { let headers = try_outcome!(OrgHeaders::from_request(request).await); - let client_version = request.headers().get_one("Bitwarden-Client-Version").map(String::from); + let client_version = request + .headers() + .get_one("Bitwarden-Client-Version") + .map(String::from); if headers.org_user_type >= UserOrgType::Admin { Outcome::Success(Self { host: headers.host, @@ -734,9 +745,14 @@ impl From<ManagerHeadersLoose> for Headers { } } } -async fn can_access_collection(org_user: &UserOrganization, col_id: &str, conn: &mut DbConn) -> bool { +async fn can_access_collection( + org_user: &UserOrganization, + col_id: &str, + conn: &mut DbConn, +) -> bool { org_user.has_full_access() - || Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn).await + || Collection::has_access_by_collection_and_user_uuid(col_id, &org_user.user_uuid, conn) + .await } impl ManagerHeaders { @@ -818,11 +834,11 @@ impl<'r> FromRequest<'r> for ClientIp { None }; - let ip = ip.or_else(|| req.remote().map(|r| r.ip())).unwrap_or_else(|| "0.0.0.0".parse().unwrap()); + let ip = ip + .or_else(|| req.remote().map(|r| r.ip())) + .unwrap_or_else(|| "0.0.0.0".parse().unwrap()); - Outcome::Success(ClientIp { - ip, - }) + Outcome::Success(ClientIp { ip }) } } @@ -843,8 +859,6 @@ impl<'r> FromRequest<'r> for WsAccessTokenHeader { None => None, }; - Outcome::Success(Self { - access_token, - }) + Outcome::Success(Self { access_token }) } } diff --git a/src/config.rs b/src/config.rs @@ -1,10 +1,7 @@ +use once_cell::sync::Lazy; use std::env::consts::EXE_SUFFIX; -use std::process::exit; use std::sync::RwLock; - -use job_scheduler_ng::Schedule; -use once_cell::sync::Lazy; -use reqwest::Url; +use url::Url; use crate::{ db::DbConnType, @@ -12,18 +9,7 @@ use crate::{ util::{get_env, get_env_bool}, }; -static CONFIG_FILE: Lazy<String> = Lazy::new(|| { - let data_folder = get_env("DATA_FOLDER").unwrap_or_else(|| String::from("data")); - get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json")) -}); - -pub static CONFIG: Lazy<Config> = Lazy::new(|| { - Config::load().unwrap_or_else(|e| { - println!("Error loading config:\n {e:?}\n"); - exit(12) - }) -}); - +pub static CONFIG: Lazy<Config> = Lazy::new(|| Config::load().expect("unable to load '.env'")); pub type Pass = String; macro_rules! make_config { @@ -40,14 +26,9 @@ macro_rules! make_config { struct Inner { rocket_shutdown_handle: Option<rocket::Shutdown>, ws_shutdown_handle: Option<tokio::sync::oneshot::Sender<()>>, - templates: Handlebars<'static>, config: ConfigItems, - _env: ConfigBuilder, - _usr: ConfigBuilder, - - _overrides: Vec<String>, } #[derive(Clone, Default, Deserialize, Serialize)] @@ -61,41 +42,9 @@ macro_rules! make_config { impl ConfigBuilder { #[allow(clippy::field_reassign_with_default)] fn from_env() -> Self { - let env_file = get_env("ENV_FILE").unwrap_or_else(|| String::from(".env")); - match dotenvy::from_path(&env_file) { - Ok(_) => { - println!("[INFO] Using environment file `{env_file}` for configuration.\n"); - }, - Err(e) => match e { - dotenvy::Error::LineParse(msg, pos) => { - println!("[ERROR] Failed parsing environment file: `{env_file}`\nNear {msg:?} on position {pos}\nPlease fix and restart!\n"); - exit(255); - }, - dotenvy::Error::Io(ioerr) => match ioerr.kind() { - std::io::ErrorKind::NotFound => { - // Only exit if this environment variable is set, but the file was not found. - // This prevents incorrectly configured environments. - if let Some(env_file) = get_env::<String>("ENV_FILE") { - println!("[ERROR] The configured ENV_FILE `{env_file}` was not found!\n"); - exit(255); - } - }, - std::io::ErrorKind::PermissionDenied => { - println!("[ERROR] Permission denied while trying to read environment file `{env_file}`!\n"); - exit(255); - }, - _ => { - println!("[ERROR] Reading environment file `{env_file}` failed:\n{ioerr:?}\n"); - exit(255); - } - }, - _ => { - println!("[ERROR] Reading environment file `{env_file}` failed:\n{e:?}\n"); - exit(255); - } - } - }; - + if dotenvy::from_path(".env").is_err() { + panic!("'.env' does not exist") + } let mut builder = ConfigBuilder::default(); $($( builder.$name = make_config! { @getenv paste::paste!(stringify!([<$name:upper>])), $ty }; @@ -103,37 +52,6 @@ macro_rules! make_config { builder } - - fn from_file(path: &str) -> Result<Self, Error> { - let config_str = std::fs::read_to_string(path)?; - println!("[INFO] Using saved config from `{path}` for configuration.\n"); - serde_json::from_str(&config_str).map_err(Into::into) - } - - /// Merges the values of both builders into a new builder. - /// If both have the same element, `other` wins. - fn merge(&self, other: &Self, show_overrides: bool, overrides: &mut Vec<String>) -> Self { - let mut builder = self.clone(); - $($( - if let v @Some(_) = &other.$name { - builder.$name = v.clone(); - - if self.$name.is_some() { - overrides.push(paste::paste!(stringify!([<$name:upper>])).into()); - } - } - )+)+ - - if show_overrides && !overrides.is_empty() { - // We can't use warn! here because logging isn't setup yet. - println!("[WARNING] The following environment variables are being overridden by the config.json file."); - println!("[WARNING] Please use the admin panel to make changes to them:"); - println!("[WARNING] {}\n", overrides.join(", ")); - } - - builder - } - fn build(&self) -> ConfigItems { let mut config = ConfigItems::default(); let _domain_set = self.domain.is_some(); @@ -150,10 +68,8 @@ macro_rules! make_config { config } } - #[derive(Clone, Default)] struct ConfigItems { $($( $name: make_config!{@type $ty, $none_action}, )+)+ } - #[allow(unused)] impl Config { $($( @@ -162,130 +78,8 @@ macro_rules! make_config { self.inner.read().unwrap().config.$name.clone() } )+)+ - - pub fn prepare_json(&self) -> serde_json::Value { - let (def, cfg, overridden) = { - let inner = &self.inner.read().unwrap(); - (inner._env.build(), inner.config.clone(), inner._overrides.clone()) - }; - - fn _get_form_type(rust_type: &str) -> &'static str { - match rust_type { - "Pass" => "password", - "String" => "text", - "bool" => "checkbox", - _ => "number" - } - } - - fn _get_doc(doc: &str) -> serde_json::Value { - let mut split = doc.split("|>").map(str::trim); - - // We do not use the json!() macro here since that causes a lot of macro recursion. - // This slows down compile time and it also causes issues with rust-analyzer - serde_json::Value::Object({ - let mut doc_json = serde_json::Map::new(); - doc_json.insert("name".into(), serde_json::to_value(split.next()).unwrap()); - doc_json.insert("description".into(), serde_json::to_value(split.next()).unwrap()); - doc_json - }) - } - - // We do not use the json!() macro here since that causes a lot of macro recursion. - // This slows down compile time and it also causes issues with rust-analyzer - serde_json::Value::Array(<[_]>::into_vec(Box::new([ - $( - serde_json::Value::Object({ - let mut group = serde_json::Map::new(); - group.insert("group".into(), (stringify!($group)).into()); - group.insert("grouptoggle".into(), (stringify!($($group_enabled)?)).into()); - group.insert("groupdoc".into(), (make_config!{ @show $($groupdoc)? }).into()); - - group.insert("elements".into(), serde_json::Value::Array(<[_]>::into_vec(Box::new([ - $( - serde_json::Value::Object({ - let mut element = serde_json::Map::new(); - element.insert("editable".into(), ($editable).into()); - element.insert("name".into(), (stringify!($name)).into()); - element.insert("value".into(), serde_json::to_value(cfg.$name).unwrap()); - element.insert("default".into(), serde_json::to_value(def.$name).unwrap()); - element.insert("type".into(), (_get_form_type(stringify!($ty))).into()); - element.insert("doc".into(), (_get_doc(concat!($($doc),+))).into()); - element.insert("overridden".into(), (overridden.contains(&paste::paste!(stringify!([<$name:upper>])).into())).into()); - element - }), - )+ - ])))); - group - }), - )+ - ]))) - } - - pub fn get_support_json(&self) -> serde_json::Value { - // Define which config keys need to be masked. - // Pass types will always be masked and no need to put them in the list. - // Besides Pass, only String types will be masked via _privacy_mask. - const PRIVACY_CONFIG: &[&str] = &[ - "allowed_iframe_ancestors", - "database_url", - "domain_origin", - "domain_path", - "domain", - "helo_name", - "org_creation_users", - "signups_domains_whitelist", - "smtp_from", - "smtp_host", - "smtp_username", - ]; - - let cfg = { - let inner = &self.inner.read().unwrap(); - inner.config.clone() - }; - - /// We map over the string and remove all alphanumeric, _ and - characters. - /// This is the fastest way (within micro-seconds) instead of using a regex (which takes mili-seconds) - fn _privacy_mask(value: &str) -> String { - let mut n: u16 = 0; - let mut colon_match = false; - value - .chars() - .map(|c| { - n += 1; - match c { - ':' if n <= 11 => { - colon_match = true; - c - } - '/' if n <= 13 && colon_match => c, - ',' => c, - _ => '*', - } - }) - .collect::<String>() - } - - serde_json::Value::Object({ - let mut json = serde_json::Map::new(); - $($( - json.insert(stringify!($name).into(), make_config!{ @supportstr $name, cfg.$name, $ty, $none_action }); - )+)+; - json - }) - } - - pub fn get_overrides(&self) -> Vec<String> { - let overrides = { - let inner = &self.inner.read().unwrap(); - inner._overrides.clone() - }; - overrides - } } }; - // Support string print ( @supportstr $name:ident, $value:expr, Pass, option ) => { serde_json::to_value($value.as_ref().map(|_| String::from("***"))).unwrap() }; // Optional pass, we map to an Option<String> with "***" ( @supportstr $name:ident, $value:expr, Pass, $none_action:ident ) => { "***".into() }; // Required pass, we return "***" @@ -694,7 +488,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { let path = std::path::Path::new(&url); if let Some(parent) = path.parent() { if !parent.is_dir() { - err!(format!("SQLite database directory `{}` does not exist or is not a directory", parent.display())); + err!(format!( + "SQLite database directory `{}` does not exist or is not a directory", + parent.display() + )); } } } @@ -705,15 +502,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { let limit = 256; if cfg.database_max_conns < 1 || cfg.database_max_conns > limit { - err!(format!("`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {limit}.",)); + err!(format!( + "`DATABASE_MAX_CONNS` contains an invalid value. Ensure it is between 1 and {limit}.", + )); } - - if let Some(log_file) = &cfg.log_file { - if std::fs::OpenOptions::new().append(true).create(true).open(log_file).is_err() { - err!("Unable to write to log file", log_file); - } - } - let dom = cfg.domain.to_lowercase(); if !dom.starts_with("http://") && !dom.starts_with("https://") { err!( @@ -727,50 +519,13 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { } let org_creation_users = cfg.org_creation_users.trim().to_lowercase(); - if !(org_creation_users.is_empty() || org_creation_users == "all" || org_creation_users == "none") + if !(org_creation_users.is_empty() + || org_creation_users == "all" + || org_creation_users == "none") && org_creation_users.split(',').any(|u| !u.contains('@')) { err!("`ORG_CREATION_USERS` contains invalid email addresses"); } - - if let Some(ref token) = cfg.admin_token { - if token.trim().is_empty() && !cfg.disable_admin_token { - println!("[WARNING] `ADMIN_TOKEN` is enabled but has an empty value, so the admin page will be disabled."); - println!("[WARNING] To enable the admin page without a token, use `DISABLE_ADMIN_TOKEN`."); - } - } - - if cfg.push_enabled && (cfg.push_installation_id == String::new() || cfg.push_installation_key == String::new()) { - err!( - "Misconfigured Push Notification service\n\ - ########################################################################################\n\ - # It looks like you enabled Push Notification feature, but didn't configure it #\n\ - # properly. Make sure the installation id and key from https://bitwarden.com/host are #\n\ - # added to your configuration. #\n\ - ########################################################################################\n" - ) - } - - if cfg._enable_duo - && (cfg.duo_host.is_some() || cfg.duo_ikey.is_some() || cfg.duo_skey.is_some()) - && !(cfg.duo_host.is_some() && cfg.duo_ikey.is_some() && cfg.duo_skey.is_some()) - { - err!("All Duo options need to be set for global Duo support") - } - - if cfg._enable_yubico { - if cfg.yubico_client_id.is_some() != cfg.yubico_secret_key.is_some() { - err!("Both `YUBICO_CLIENT_ID` and `YUBICO_SECRET_KEY` must be set for Yubikey OTP support") - } - - if let Some(yubico_server) = &cfg.yubico_server { - let yubico_server = yubico_server.to_lowercase(); - if !yubico_server.starts_with("https://") { - err!("`YUBICO_SERVER` must be a valid URL and start with 'https://'. Either unset this variable or provide a valid URL.") - } - } - } - if cfg._enable_smtp { match cfg.smtp_security.as_str() { "off" | "starttls" | "force_tls" => (), @@ -780,7 +535,10 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { } if cfg.use_sendmail { - let command = cfg.sendmail_command.clone().unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}")); + let command = cfg + .sendmail_command + .clone() + .unwrap_or_else(|| format!("sendmail{EXE_SUFFIX}")); let mut path = std::path::PathBuf::from(&command); @@ -796,7 +554,9 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { err!(format!("sendmail command not found at `{path:?}`")) } Err(err) => { - err!(format!("failed to access sendmail command at `{path:?}`: {err}")) + err!(format!( + "failed to access sendmail command at `{path:?}`: {err}" + )) } Ok(metadata) => { if metadata.is_dir() { @@ -825,101 +585,18 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> { if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !cfg.smtp_from.contains('@') { err!("SMTP_FROM does not contain a mandatory @ sign") } - - if cfg._enable_email_2fa && cfg.email_token_size < 6 { - err!("`EMAIL_TOKEN_SIZE` has a minimum size of 6") - } - } - - if cfg._enable_email_2fa && !(cfg.smtp_host.is_some() || cfg.use_sendmail) { - err!("To enable email 2FA, a mail transport must be configured") - } - - // Check if the icon blacklist regex is valid - if let Some(ref r) = cfg.icon_blacklist_regex { - let validate_regex = regex::Regex::new(r); - match validate_regex { - Ok(_) => (), - Err(e) => err!(format!("`ICON_BLACKLIST_REGEX` is invalid: {e:#?}")), - } } - // Check if the icon service is valid let icon_service = cfg.icon_service.as_str(); match icon_service { - "internal" | "bitwarden" | "duckduckgo" | "google" => (), - _ => { - if !icon_service.starts_with("http") { - err!(format!("Icon service URL `{icon_service}` must start with \"http\"")) - } - match icon_service.matches("{}").count() { - 1 => (), // nominal - 0 => err!(format!("Icon service URL `{icon_service}` has no placeholder \"{{}}\"")), - _ => err!(format!("Icon service URL `{icon_service}` has more than one placeholder \"{{}}\"")), - } - } - } - - // Check if the icon redirect code is valid - match cfg.icon_redirect_code { - 301 | 302 | 307 | 308 => (), - _ => err!("Only HTTP 301/302 and 307/308 redirects are supported"), + "internal" => (), + _ => err!(format!( + "Icon service URL `{icon_service}` must start with \"http\"" + )), } - if cfg.invitation_expiration_hours < 1 { err!("`INVITATION_EXPIRATION_HOURS` has a minimum duration of 1 hour") } - - // Validate schedule crontab format - if !cfg.send_purge_schedule.is_empty() && cfg.send_purge_schedule.parse::<Schedule>().is_err() { - err!("`SEND_PURGE_SCHEDULE` is not a valid cron expression") - } - - if !cfg.trash_purge_schedule.is_empty() && cfg.trash_purge_schedule.parse::<Schedule>().is_err() { - err!("`TRASH_PURGE_SCHEDULE` is not a valid cron expression") - } - - if !cfg.incomplete_2fa_schedule.is_empty() && cfg.incomplete_2fa_schedule.parse::<Schedule>().is_err() { - err!("`INCOMPLETE_2FA_SCHEDULE` is not a valid cron expression") - } - - if !cfg.emergency_notification_reminder_schedule.is_empty() - && cfg.emergency_notification_reminder_schedule.parse::<Schedule>().is_err() - { - err!("`EMERGENCY_NOTIFICATION_REMINDER_SCHEDULE` is not a valid cron expression") - } - - if !cfg.emergency_request_timeout_schedule.is_empty() - && cfg.emergency_request_timeout_schedule.parse::<Schedule>().is_err() - { - err!("`EMERGENCY_REQUEST_TIMEOUT_SCHEDULE` is not a valid cron expression") - } - - if !cfg.event_cleanup_schedule.is_empty() && cfg.event_cleanup_schedule.parse::<Schedule>().is_err() { - err!("`EVENT_CLEANUP_SCHEDULE` is not a valid cron expression") - } - - if !cfg.auth_request_purge_schedule.is_empty() && cfg.auth_request_purge_schedule.parse::<Schedule>().is_err() { - err!("`AUTH_REQUEST_PURGE_SCHEDULE` is not a valid cron expression") - } - - if !cfg.disable_admin_token { - match cfg.admin_token.as_ref() { - Some(t) if t.starts_with("$argon2") => { - if let Err(e) = argon2::password_hash::PasswordHash::new(t) { - err!(format!("The configured Argon2 PHC in `ADMIN_TOKEN` is invalid: '{e}'")) - } - } - Some(_) => { - println!( - "[NOTICE] You are using a plain text `ADMIN_TOKEN` which is insecure.\n\ - Please generate a secure Argon2 PHC string by using `vaultwarden hash` or `argon2`.\n\ - See: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page#secure-the-admin_token\n" - ); - } - _ => {} - } - } Ok(()) } @@ -983,7 +660,10 @@ fn generate_icon_service_csp(icon_service: &str, icon_service_url: &str) -> Stri } /// Convert the old SMTP_SSL and SMTP_EXPLICIT_TLS options -fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls: Option<bool>) -> String { +fn smtp_convert_deprecated_ssl_options( + smtp_ssl: Option<bool>, + smtp_explicit_tls: Option<bool>, +) -> String { if smtp_explicit_tls.is_some() || smtp_ssl.is_some() { println!("[DEPRECATED]: `SMTP_SSL` or `SMTP_EXPLICIT_TLS` is set. Please use `SMTP_SECURITY` instead."); } @@ -998,18 +678,9 @@ fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls impl Config { pub fn load() -> Result<Self, Error> { - // Loading from env and file let _env = ConfigBuilder::from_env(); - let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default(); - - // Create merged config, config file overwrites env - let mut _overrides = Vec::new(); - let builder = _env.merge(&_usr, true, &mut _overrides); - - // Fill any missing with defaults - let config = builder.build(); + let config = _env.build(); validate_config(&config)?; - Ok(Config { inner: RwLock::new(Inner { rocket_shutdown_handle: None, @@ -1017,55 +688,9 @@ impl Config { templates: load_templates(&config.templates_folder), config, _env, - _usr, - _overrides, }), }) } - - pub fn update_config(&self, other: ConfigBuilder) -> Result<(), Error> { - // Remove default values - //let builder = other.remove(&self.inner.read().unwrap()._env); - - // TODO: Remove values that are defaults, above only checks those set by env and not the defaults - let builder = other; - - // Serialize now before we consume the builder - let config_str = serde_json::to_string_pretty(&builder)?; - - // Prepare the combined config - let mut overrides = Vec::new(); - let config = { - let env = &self.inner.read().unwrap()._env; - env.merge(&builder, false, &mut overrides).build() - }; - validate_config(&config)?; - - // Save both the user and the combined config - { - let mut writer = self.inner.write().unwrap(); - writer.config = config; - writer._usr = builder; - writer._overrides = overrides; - } - - //Save to file - use std::{fs::File, io::Write}; - let mut file = File::create(&*CONFIG_FILE)?; - file.write_all(config_str.as_bytes())?; - - Ok(()) - } - - fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> { - let builder = { - let usr = &self.inner.read().unwrap()._usr; - let mut _overrides = Vec::new(); - usr.merge(&other, false, &mut _overrides) - }; - self.update_config(builder) - } - /// Tests whether an email's domain is allowed. A domain is allowed if it /// is in signups_domains_whitelist, or if no whitelist is set (so there /// are no domain restrictions in effect). @@ -1105,29 +730,6 @@ impl Config { } } - pub fn delete_user_config(&self) -> Result<(), Error> { - crate::util::delete_file(&CONFIG_FILE)?; - - // Empty user config - let usr = ConfigBuilder::default(); - - // Config now is env + defaults - let config = { - let env = &self.inner.read().unwrap()._env; - env.build() - }; - - // Save configs - { - let mut writer = self.inner.write().unwrap(); - writer.config = config; - writer._usr = usr; - writer._overrides = Vec::new(); - } - - Ok(()) - } - pub fn private_rsa_key(&self) -> String { format!("{}.pem", CONFIG.rsa_key_filename()) } @@ -1138,28 +740,9 @@ impl Config { let inner = &self.inner.read().unwrap().config; inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail) } - - pub fn get_duo_akey(&self) -> String { - if let Some(akey) = self._duo_akey() { - akey - } else { - let akey_s = crate::crypto::encode_random_bytes::<64>(data_encoding::BASE64); - - // Save the new value - let builder = ConfigBuilder { - _duo_akey: Some(akey_s.clone()), - ..Default::default() - }; - self.update_config_partial(builder).ok(); - - akey_s - } - } - /// Tests whether the admin token is set to a non-empty value. pub fn is_admin_token_set(&self) -> bool { let token = self.admin_token(); - token.is_some() && !token.unwrap().trim().is_empty() } @@ -1168,14 +751,8 @@ impl Config { name: &str, data: &T, ) -> Result<String, crate::error::Error> { - if CONFIG.reload_templates() { - warn!("RELOADING TEMPLATES"); - let hb = load_templates(CONFIG.templates_folder()); - hb.render(name, data).map_err(Into::into) - } else { - let hb = &CONFIG.inner.read().unwrap().templates; - hb.render(name, data).map_err(Into::into) - } + let hb = &CONFIG.inner.read().unwrap().templates; + hb.render(name, data).map_err(Into::into) } pub fn set_rocket_shutdown_handle(&self, handle: rocket::Shutdown) { @@ -1199,7 +776,9 @@ impl Config { } } -use handlebars::{Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable}; +use handlebars::{ + Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, Renderable, +}; fn load_templates<P>(path: P) -> Handlebars<'static> where @@ -1279,11 +858,15 @@ fn case_helper<'reg, 'rc>( rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, ) -> HelperResult { - let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?; + let param = h + .param(0) + .ok_or_else(|| RenderError::new("Param not found for helper \"case\""))?; let value = param.value().clone(); if h.params().iter().skip(1).any(|x| x.value() == &value) { - h.template().map(|t| t.render(r, ctx, rc, out)).unwrap_or_else(|| Ok(())) + h.template() + .map(|t| t.render(r, ctx, rc, out)) + .unwrap_or_else(|| Ok(())) } else { Ok(()) } @@ -1296,13 +879,21 @@ fn js_escape_helper<'reg, 'rc>( _rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, ) -> HelperResult { - let param = h.param(0).ok_or_else(|| RenderError::new("Param not found for helper \"jsesc\""))?; + let param = h + .param(0) + .ok_or_else(|| RenderError::new("Param not found for helper \"jsesc\""))?; let no_quote = h.param(1).is_some(); - let value = param.value().as_str().ok_or_else(|| RenderError::new("Param for helper \"jsesc\" is not a String"))?; + let value = param + .value() + .as_str() + .ok_or_else(|| RenderError::new("Param for helper \"jsesc\" is not a String"))?; - let mut escaped_value = value.replace('\\', "").replace('\'', "\\x22").replace('\"', "\\x27"); + let mut escaped_value = value + .replace('\\', "") + .replace('\'', "\\x22") + .replace('\"', "\\x27"); if !no_quote { escaped_value = format!("&quot;{escaped_value}&quot;"); } @@ -1318,7 +909,10 @@ fn to_json<'reg, 'rc>( _rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, ) -> HelperResult { - let param = h.param(0).ok_or_else(|| RenderError::new("Expected 1 parameter for \"to_json\""))?.value(); + let param = h + .param(0) + .ok_or_else(|| RenderError::new("Expected 1 parameter for \"to_json\""))? + .value(); let json = serde_json::to_string(param) .map_err(|e| RenderError::new(format!("Can't serialize parameter to JSON: {e}")))?; out.write(&json)?; diff --git a/src/crypto.rs b/src/crypto.rs @@ -4,7 +4,7 @@ use std::num::NonZeroU32; use data_encoding::{Encoding, HEXLOWER}; -use ring::{digest, hmac, pbkdf2}; +use ring::{digest, pbkdf2}; static DIGEST_ALG: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256; const OUTPUT_LEN: usize = digest::SHA256_OUTPUT_LEN; @@ -22,17 +22,6 @@ pub fn verify_password_hash(secret: &[u8], salt: &[u8], previous: &[u8], iterati let iterations = NonZeroU32::new(iterations).expect("Iterations can't be zero"); pbkdf2::verify(DIGEST_ALG, iterations, salt, secret, previous).is_ok() } - -// -// HMAC -// -pub fn hmac_sign(key: &str, data: &str) -> String { - let key = hmac::Key::new(hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, key.as_bytes()); - let signature = hmac::sign(&key, data.as_bytes()); - - HEXLOWER.encode(signature.as_ref()) -} - // // Random values // @@ -42,7 +31,9 @@ pub fn get_random_bytes<const N: usize>() -> [u8; N] { use ring::rand::{SecureRandom, SystemRandom}; let mut array = [0; N]; - SystemRandom::new().fill(&mut array).expect("Error generating random values"); + SystemRandom::new() + .fill(&mut array) + .expect("Error generating random values"); array } diff --git a/src/db/mod.rs b/src/db/mod.rs @@ -22,18 +22,9 @@ use crate::{ CONFIG, }; -#[cfg(sqlite)] #[path = "schemas/sqlite/schema.rs"] pub mod __sqlite_schema; -#[cfg(mysql)] -#[path = "schemas/mysql/schema.rs"] -pub mod __mysql_schema; - -#[cfg(postgresql)] -#[path = "schemas/postgresql/schema.rs"] -pub mod __postgresql_schema; - // These changes are based on Rocket 0.5-rc wrapper of Diesel: https://github.com/SergioBenitez/Rocket/blob/v0.5-rc/contrib/sync_db_pools // A wrapper around spawn_blocking that propagates panics to the calling code. @@ -178,46 +169,14 @@ macro_rules! generate_connections { }; } -#[cfg(not(query_logger))] generate_connections! { sqlite: diesel::sqlite::SqliteConnection, mysql: diesel::mysql::MysqlConnection, postgresql: diesel::pg::PgConnection } - -#[cfg(query_logger)] -generate_connections! { - sqlite: diesel_logger::LoggingConnection<diesel::sqlite::SqliteConnection>, - mysql: diesel_logger::LoggingConnection<diesel::mysql::MysqlConnection>, - postgresql: diesel_logger::LoggingConnection<diesel::pg::PgConnection> -} - impl DbConnType { - pub fn from_url(url: &str) -> Result<DbConnType, Error> { - // Mysql - if url.starts_with("mysql:") { - #[cfg(mysql)] - return Ok(DbConnType::mysql); - - #[cfg(not(mysql))] - err!("`DATABASE_URL` is a MySQL URL, but the 'mysql' feature is not enabled") - - // Postgres - } else if url.starts_with("postgresql:") || url.starts_with("postgres:") { - #[cfg(postgresql)] - return Ok(DbConnType::postgresql); - - #[cfg(not(postgresql))] - err!("`DATABASE_URL` is a PostgreSQL URL, but the 'postgresql' feature is not enabled") - - //Sqlite - } else { - #[cfg(sqlite)] - return Ok(DbConnType::sqlite); - - #[cfg(not(sqlite))] - err!("`DATABASE_URL` looks like a SQLite URL, but 'sqlite' feature is not enabled") - } + pub fn from_url(_: &str) -> Result<DbConnType, Error> { + Ok(DbConnType::sqlite) } pub fn get_init_stmts(&self) -> String { @@ -230,11 +189,7 @@ impl DbConnType { } pub fn default_init_stmts(&self) -> String { - match self { - Self::sqlite => "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;".to_string(), - Self::mysql => String::new(), - Self::postgresql => String::new(), - } + "PRAGMA busy_timeout = 5000; PRAGMA synchronous = NORMAL;".to_string() } } @@ -330,13 +285,7 @@ macro_rules! db_object { )+ ) => { // Create the normal struct, without attributes $( pub struct $name { $( /*$( #[$field_attr] )**/ $vis $field : $typ, )+ } )+ - - #[cfg(sqlite)] pub mod __sqlite_model { $( db_object! { @db sqlite | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ } - #[cfg(mysql)] - pub mod __mysql_model { $( db_object! { @db mysql | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ } - #[cfg(postgresql)] - pub mod __postgresql_model { $( db_object! { @db postgresql | $( #[$attr] )* | $name | $( $( #[$field_attr] )* $field : $typ ),+ } )+ } }; ( @db $db:ident | $( #[$attr:meta] )* | $name:ident | $( $( #[$field_attr:meta] )* $vis:vis $field:ident : $typ:ty),+) => { @@ -366,44 +315,6 @@ macro_rules! db_object { // Reexport the models, needs to be after the macros are defined so it can access them pub mod models; - -/// Creates a back-up of the sqlite database -/// MySQL/MariaDB and PostgreSQL are not supported. -pub async fn backup_database(conn: &mut DbConn) -> Result<(), Error> { - db_run! {@raw conn: - postgresql, mysql { - let _ = conn; - err!("PostgreSQL and MySQL/MariaDB do not support this backup feature"); - } - sqlite { - use std::path::Path; - let db_url = CONFIG.database_url(); - let db_path = Path::new(&db_url).parent().unwrap().to_string_lossy(); - let file_date = chrono::Utc::now().format("%Y%m%d_%H%M%S").to_string(); - diesel::sql_query(format!("VACUUM INTO '{db_path}/db_{file_date}.sqlite3'")).execute(conn)?; - Ok(()) - } - } -} - -/// Get the SQL Server version -pub async fn get_sql_server_version(conn: &mut DbConn) -> String { - db_run! {@raw conn: - postgresql, mysql { - sql_function!{ - fn version() -> diesel::sql_types::Text; - } - diesel::select(version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string()) - } - sqlite { - sql_function!{ - fn sqlite_version() -> diesel::sql_types::Text; - } - diesel::select(sqlite_version()).get_result::<String>(conn).unwrap_or_else(|_| "Unknown".to_string()) - } - } -} - /// Attempts to retrieve a single connection from the managed database pool. If /// no pool is currently managed, fails with an `InternalServerError` status. If /// no connections are available, fails with a `ServiceUnavailable` status. @@ -413,16 +324,15 @@ impl<'r> FromRequest<'r> for DbConn { async fn from_request(request: &'r Request<'_>) -> Outcome<Self, Self::Error> { match request.rocket().state::<DbPool>() { - Some(p) => p.get().await.map_err(|_| ()).into_outcome(Status::ServiceUnavailable), + Some(p) => p + .get() + .await + .map_err(|_| ()) + .into_outcome(Status::ServiceUnavailable), None => Outcome::Failure((Status::InternalServerError, ())), } } } - -// Embed the migrations from the migrations folder into the application -// This way, the program automatically migrates the database to the latest version -// https://docs.rs/diesel_migrations/*/diesel_migrations/macro.embed_migrations.html -#[cfg(sqlite)] mod sqlite_migrations { use diesel_migrations::{EmbeddedMigrations, MigrationHarness}; pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/sqlite"); @@ -444,45 +354,14 @@ mod sqlite_migrations { // Turn on WAL in SQLite if crate::CONFIG.enable_db_wal() { - diesel::sql_query("PRAGMA journal_mode=wal").execute(&mut connection).expect("Failed to turn on WAL"); + diesel::sql_query("PRAGMA journal_mode=wal") + .execute(&mut connection) + .expect("Failed to turn on WAL"); } - connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations"); - Ok(()) - } -} - -#[cfg(mysql)] -mod mysql_migrations { - use diesel_migrations::{EmbeddedMigrations, MigrationHarness}; - pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/mysql"); - - pub fn run_migrations() -> Result<(), super::Error> { - use diesel::{Connection, RunQueryDsl}; - // Make sure the database is up to date (create if it doesn't exist, or run the migrations) - let mut connection = diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?; - // Disable Foreign Key Checks during migration - - // Scoped to a connection/session. - diesel::sql_query("SET FOREIGN_KEY_CHECKS = 0") - .execute(&mut connection) - .expect("Failed to disable Foreign Key Checks during migrations"); - - connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations"); - Ok(()) - } -} - -#[cfg(postgresql)] -mod postgresql_migrations { - use diesel_migrations::{EmbeddedMigrations, MigrationHarness}; - pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/postgresql"); - - pub fn run_migrations() -> Result<(), super::Error> { - use diesel::Connection; - // Make sure the database is up to date (create if it doesn't exist, or run the migrations) - let mut connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?; - connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations"); + connection + .run_pending_migrations(MIGRATIONS) + .expect("Error running migrations"); Ok(()) } } diff --git a/src/db/models/cipher.rs b/src/db/models/cipher.rs @@ -3,7 +3,8 @@ use chrono::{Duration, NaiveDateTime, Utc}; use serde_json::Value; use super::{ - Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization, + Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, + UserOrganization, }; use crate::api::core::{CipherData, CipherSyncData, CipherSyncType}; @@ -135,19 +136,26 @@ impl Cipher { } } - let fields_json = self.fields.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null); - let password_history_json = - self.password_history.as_ref().and_then(|s| serde_json::from_str(s).ok()).unwrap_or(Value::Null); + let fields_json = self + .fields + .as_ref() + .and_then(|s| serde_json::from_str(s).ok()) + .unwrap_or(Value::Null); + let password_history_json = self + .password_history + .as_ref() + .and_then(|s| serde_json::from_str(s).ok()) + .unwrap_or(Value::Null); // We don't need these values at all for Organizational syncs // Skip any other database calls if this is the case and just return false. let (read_only, hide_passwords) = if sync_type == CipherSyncType::User { - match self.get_access_restrictions(user_uuid, cipher_sync_data, conn).await { + match self + .get_access_restrictions(user_uuid, cipher_sync_data, conn) + .await + { Some((ro, hp)) => (ro, hp), - None => { - error!("Cipher ownership assertion failure"); - (true, true) - } + None => (true, true), } } else { (false, false) @@ -155,8 +163,8 @@ impl Cipher { // Get the type_data or a default to an empty json object '{}'. // If not passing an empty object, mobile clients will crash. - let mut type_data_json: Value = - serde_json::from_str(&self.data).unwrap_or_else(|_| Value::Object(serde_json::Map::new())); + let mut type_data_json: Value = serde_json::from_str(&self.data) + .unwrap_or_else(|_| Value::Object(serde_json::Map::new())); // NOTE: This was marked as *Backwards Compatibility Code*, but as of January 2021 this is still being used by upstream // Set the first element of the Uris array as Uri, this is needed several (mobile) clients. @@ -235,7 +243,10 @@ impl Cipher { // Skip adding these fields in that case if sync_type == CipherSyncType::User { json_object["FolderId"] = json!(if let Some(cipher_sync_data) = cipher_sync_data { - cipher_sync_data.cipher_folders.get(&self.uuid).map(|c| c.to_string()) + cipher_sync_data + .cipher_folders + .get(&self.uuid) + .map(|c| c.to_string()) } else { self.get_folder_uuid(user_uuid, conn).await }); @@ -273,7 +284,11 @@ impl Cipher { None => { // Belongs to Organization, need to update affected users if let Some(ref org_uuid) = self.organization_uuid { - for user_org in UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn).await.iter() { + for user_org in + UserOrganization::find_by_cipher_and_org(&self.uuid, org_uuid, conn) + .await + .iter() + { User::update_uuid_revision(&user_org.user_uuid, conn).await; user_uuids.push(user_org.user_uuid.clone()) } @@ -359,7 +374,12 @@ impl Cipher { } } - pub async fn move_to_folder(&self, folder_uuid: Option<String>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn move_to_folder( + &self, + folder_uuid: Option<String>, + user_uuid: &str, + conn: &mut DbConn, + ) -> EmptyResult { User::update_uuid_revision(user_uuid, conn).await; match (self.get_folder_uuid(user_uuid, conn).await, folder_uuid) { @@ -371,14 +391,18 @@ impl Cipher { (None, Some(new)) => FolderCipher::new(&new, &self.uuid).save(conn).await, // Remove from folder - (Some(old), None) => match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { - Some(old) => old.delete(conn).await, - None => err!("Couldn't move from previous folder"), - }, + (Some(old), None) => { + match FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { + Some(old) => old.delete(conn).await, + None => err!("Couldn't move from previous folder"), + } + } // Move to another folder (Some(old), Some(new)) => { - if let Some(old) = FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await { + if let Some(old) = + FolderCipher::find_by_folder_and_cipher(&old, &self.uuid, conn).await + { old.delete(conn).await?; } FolderCipher::new(&new, &self.uuid).save(conn).await @@ -403,7 +427,9 @@ impl Cipher { if let Some(cached_user_org) = cipher_sync_data.user_organizations.get(org_uuid) { return cached_user_org.has_full_access(); } - } else if let Some(user_org) = UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await { + } else if let Some(user_org) = + UserOrganization::find_by_user_and_org(user_uuid, org_uuid, conn).await + { return user_org.has_full_access(); } } @@ -419,7 +445,10 @@ impl Cipher { ) -> bool { if let Some(ref org_uuid) = self.organization_uuid { if let Some(cipher_sync_data) = cipher_sync_data { - return cipher_sync_data.user_group_full_access_for_organizations.get(org_uuid).is_some(); + return cipher_sync_data + .user_group_full_access_for_organizations + .get(org_uuid) + .is_some(); } else { return Group::is_in_full_access_group(user_uuid, org_uuid, conn).await; } @@ -442,8 +471,12 @@ impl Cipher { // a collection that the user has full access to. If so, there are no // access restrictions. if self.is_owned_by_user(user_uuid) - || self.is_in_full_access_org(user_uuid, cipher_sync_data, conn).await - || self.is_in_full_access_group(user_uuid, cipher_sync_data, conn).await + || self + .is_in_full_access_org(user_uuid, cipher_sync_data, conn) + .await + || self + .is_in_full_access_group(user_uuid, cipher_sync_data, conn) + .await { return Some((false, false)); } @@ -465,8 +498,14 @@ impl Cipher { } rows } else { - let mut access_flags = self.get_user_collections_access_flags(user_uuid, conn).await; - access_flags.append(&mut self.get_group_collections_access_flags(user_uuid, conn).await); + let mut access_flags = self + .get_user_collections_access_flags(user_uuid, conn) + .await; + access_flags.append( + &mut self + .get_group_collections_access_flags(user_uuid, conn) + .await, + ); access_flags }; @@ -494,7 +533,11 @@ impl Cipher { Some((read_only, hide_passwords)) } - async fn get_user_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> { + async fn get_user_collections_access_flags( + &self, + user_uuid: &str, + conn: &mut DbConn, + ) -> Vec<(bool, bool)> { db_run! {conn: { // Check whether this cipher is in any collections accessible to the // user. If so, retrieve the access flags for each collection. @@ -511,7 +554,11 @@ impl Cipher { }} } - async fn get_group_collections_access_flags(&self, user_uuid: &str, conn: &mut DbConn) -> Vec<(bool, bool)> { + async fn get_group_collections_access_flags( + &self, + user_uuid: &str, + conn: &mut DbConn, + ) -> Vec<(bool, bool)> { db_run! {conn: { ciphers::table .filter(ciphers::uuid.eq(&self.uuid)) @@ -542,7 +589,9 @@ impl Cipher { } pub async fn is_accessible_to_user(&self, user_uuid: &str, conn: &mut DbConn) -> bool { - self.get_access_restrictions(user_uuid, None, conn).await.is_some() + self.get_access_restrictions(user_uuid, None, conn) + .await + .is_some() } // Returns whether this cipher is a favorite of the specified user. @@ -551,7 +600,12 @@ impl Cipher { } // Sets whether this cipher is a favorite of the specified user. - pub async fn set_favorite(&self, favorite: Option<bool>, user_uuid: &str, conn: &mut DbConn) -> EmptyResult { + pub async fn set_favorite( + &self, + favorite: Option<bool>, + user_uuid: &str, + conn: &mut DbConn, + ) -> EmptyResult { match favorite { None => Ok(()), // No change requested. Some(status) => Favorite::set_favorite(status, &self.uuid, user_uuid, conn).await, @@ -733,7 +787,10 @@ impl Cipher { /// Return a Vec with (cipher_uuid, collection_uuid) /// This is used during a full sync so we only need one query for all collections accessible. - pub async fn get_collections_with_cipher_by_user(user_id: String, conn: &mut DbConn) -> Vec<(String, String)> { + pub async fn get_collections_with_cipher_by_user( + user_id: String, + conn: &mut DbConn, + ) -> Vec<(String, String)> { db_run! {conn: { ciphers_collections::table .inner_join(collections::table.on( diff --git a/src/db/models/org_policy.rs b/src/db/models/org_policy.rs @@ -179,7 +179,11 @@ impl OrgPolicy { }} } - pub async fn find_by_org_and_type(org_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Option<Self> { + pub async fn find_by_org_and_type( + org_uuid: &str, + policy_type: OrgPolicyType, + conn: &mut DbConn, + ) -> Option<Self> { db_run! { conn: { org_policies::table .filter(org_policies::org_uuid.eq(org_uuid)) @@ -258,15 +262,21 @@ impl OrgPolicy { exclude_org_uuid: Option<&str>, conn: &mut DbConn, ) -> bool { - for policy in - OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy(user_uuid, policy_type, conn).await + for policy in OrgPolicy::find_accepted_and_confirmed_by_user_and_active_policy( + user_uuid, + policy_type, + conn, + ) + .await { // Check if we need to skip this organization. if exclude_org_uuid.is_some() && exclude_org_uuid.unwrap() == policy.org_uuid { continue; } - if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { + if let Some(user) = + UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await + { if user.atype < UserOrgType::Admin { return true; } @@ -283,7 +293,9 @@ impl OrgPolicy { ) -> OrgPolicyResult { // Enforce TwoFactor/TwoStep login if TwoFactor::find_by_user(user_uuid, conn).await.is_empty() { - match Self::find_by_org_and_type(org_uuid, OrgPolicyType::TwoFactorAuthentication, conn).await { + match Self::find_by_org_and_type(org_uuid, OrgPolicyType::TwoFactorAuthentication, conn) + .await + { Some(p) if p.enabled => { return Err(OrgPolicyErr::TwoFactorMissing); } @@ -298,7 +310,8 @@ impl OrgPolicy { } else { None }; - if Self::is_applicable_to_user(user_uuid, OrgPolicyType::SingleOrg, exclude_org, conn).await { + if Self::is_applicable_to_user(user_uuid, OrgPolicyType::SingleOrg, exclude_org, conn).await + { return Err(OrgPolicyErr::SingleOrgEnforced); } @@ -307,12 +320,13 @@ impl OrgPolicy { pub async fn org_is_reset_password_auto_enroll(org_uuid: &str, conn: &mut DbConn) -> bool { match OrgPolicy::find_by_org_and_type(org_uuid, OrgPolicyType::ResetPassword, conn).await { - Some(policy) => match serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) { - Ok(opts) => { + Some(policy) => { + if let Ok(opts) = + serde_json::from_str::<UpCase<ResetPasswordDataModel>>(&policy.data) + { return policy.enabled && opts.data.AutoEnrollEnabled; } - _ => error!("Failed to deserialize ResetPasswordDataModel: {}", policy.data), - }, + } None => return false, } @@ -322,18 +336,23 @@ impl OrgPolicy { /// Returns true if the user belongs to an org that has enabled the `DisableHideEmail` /// option of the `Send Options` policy, and the user is not an owner or admin of that org. pub async fn is_hide_email_disabled(user_uuid: &str, conn: &mut DbConn) -> bool { - for policy in - OrgPolicy::find_confirmed_by_user_and_active_policy(user_uuid, OrgPolicyType::SendOptions, conn).await + for policy in OrgPolicy::find_confirmed_by_user_and_active_policy( + user_uuid, + OrgPolicyType::SendOptions, + conn, + ) + .await { - if let Some(user) = UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await { + if let Some(user) = + UserOrganization::find_by_user_and_org(user_uuid, &policy.org_uuid, conn).await + { if user.atype < UserOrgType::Admin { - match serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) { - Ok(opts) => { - if opts.data.DisableHideEmail { - return true; - } + if let Ok(opts) = + serde_json::from_str::<UpCase<SendOptionsPolicyData>>(&policy.data) + { + if opts.data.DisableHideEmail { + return true; } - _ => error!("Failed to deserialize SendOptionsPolicyData: {}", policy.data), } } } diff --git a/src/db/models/organization.rs b/src/db/models/organization.rs @@ -112,7 +112,10 @@ impl PartialOrd<i32> for UserOrgType { } fn ge(&self, other: &i32) -> bool { - matches!(self.partial_cmp(other), Some(Ordering::Greater) | Some(Ordering::Equal)) + matches!( + self.partial_cmp(other), + Some(Ordering::Greater) | Some(Ordering::Equal) + ) } } @@ -135,13 +138,21 @@ impl PartialOrd<UserOrgType> for i32 { } fn le(&self, other: &UserOrgType) -> bool { - matches!(self.partial_cmp(other), Some(Ordering::Less) | Some(Ordering::Equal) | None) + matches!( + self.partial_cmp(other), + Some(Ordering::Less) | Some(Ordering::Equal) | None + ) } } /// Local methods impl Organization { - pub fn new(name: String, billing_email: String, private_key: Option<String>, public_key: Option<String>) -> Self { + pub fn new( + name: String, + billing_email: String, + private_key: Option<String>, + public_key: Option<String>, + ) -> Self { Self { uuid: crate::util::get_uuid(), name, @@ -162,8 +173,8 @@ impl Organization { "MaxStorageGb": 10, // The value doesn't matter, we don't check server-side "Use2fa": true, "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet) - "UseEvents": CONFIG.org_events_enabled(), - "UseGroups": CONFIG.org_groups_enabled(), + "UseEvents": false, + "UseGroups": false, "UseTotp": true, "UsePolicies": true, // "UseScim": false, // Not supported (Not AGPLv3 Licensed) @@ -269,7 +280,10 @@ use crate::error::MapResult; impl Organization { pub async fn save(&self, conn: &mut DbConn) -> EmptyResult { if !email_address::EmailAddress::is_valid(self.billing_email.trim()) { - err!(format!("BillingEmail {} is not a valid email address", self.billing_email.trim())) + err!(format!( + "BillingEmail {} is not a valid email address", + self.billing_email.trim() + )) } for user_org in UserOrganization::find_by_org(&self.uuid, conn).await.iter() { @@ -342,7 +356,9 @@ impl Organization { impl UserOrganization { pub async fn to_json(&self, conn: &mut DbConn) -> Value { - let org = Organization::find_by_uuid(&self.org_uuid, conn).await.unwrap(); + let org = Organization::find_by_uuid(&self.org_uuid, conn) + .await + .unwrap(); // https://github.com/bitwarden/server/blob/13d1e74d6960cf0d042620b72d85bf583a4236f7/src/Api/Models/Response/ProfileOrganizationResponseModel.cs json!({ @@ -354,8 +370,8 @@ impl UserOrganization { "UsersGetPremium": true, "Use2fa": true, "UseDirectory": false, // Is supported, but this value isn't checked anywhere (yet) - "UseEvents": CONFIG.org_events_enabled(), - "UseGroups": CONFIG.org_groups_enabled(), + "UseEvents": false, + "UseGroups": false, "UseTotp": true, // "UseScim": false, // Not supported (Not AGPLv3 Licensed) "UsePolicies": true, @@ -409,7 +425,7 @@ impl UserOrganization { pub async fn to_json_user_details( &self, include_collections: bool, - include_groups: bool, + _: bool, conn: &mut DbConn, ) -> Value { let user = User::find_by_uuid(&self.user_uuid, conn).await.unwrap(); @@ -423,27 +439,23 @@ impl UserOrganization { }; let twofactor_enabled = !TwoFactor::find_by_user(&user.uuid, conn).await.is_empty(); - - let groups: Vec<String> = if include_groups && CONFIG.org_groups_enabled() { - GroupUser::find_by_user(&self.uuid, conn).await.iter().map(|gu| gu.groups_uuid.clone()).collect() - } else { - // The Bitwarden clients seem to call this API regardless of whether groups are enabled, - // so just act as if there are no groups. - Vec::with_capacity(0) - }; - + let groups: Vec<String> = Vec::new(); let collections: Vec<Value> = if include_collections { - CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn) - .await - .iter() - .map(|cu| { - json!({ - "Id": cu.collection_uuid, - "ReadOnly": cu.read_only, - "HidePasswords": cu.hide_passwords, - }) + CollectionUser::find_by_organization_and_user_uuid( + &self.org_uuid, + &self.user_uuid, + conn, + ) + .await + .iter() + .map(|cu| { + json!({ + "Id": cu.collection_uuid, + "ReadOnly": cu.read_only, + "HidePasswords": cu.hide_passwords, }) - .collect() + }) + .collect() } else { Vec::with_capacity(0) }; @@ -479,8 +491,12 @@ impl UserOrganization { let coll_uuids = if self.access_all { vec![] // If we have complete access, no need to fill the array } else { - let collections = - CollectionUser::find_by_organization_and_user_uuid(&self.org_uuid, &self.user_uuid, conn).await; + let collections = CollectionUser::find_by_organization_and_user_uuid( + &self.org_uuid, + &self.user_uuid, + conn, + ) + .await; collections .iter() .map(|c| { @@ -574,9 +590,15 @@ impl UserOrganization { Ok(()) } - pub async fn find_by_email_and_org(email: &str, org_id: &str, conn: &mut DbConn) -> Option<UserOrganization> { + pub async fn find_by_email_and_org( + email: &str, + org_id: &str, + conn: &mut DbConn, + ) -> Option<UserOrganization> { if let Some(user) = super::User::find_by_mail(email, conn).await { - if let Some(user_org) = UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await { + if let Some(user_org) = + UserOrganization::find_by_user_and_org(&user.uuid, org_id, conn).await + { return Some(user_org); } } @@ -593,7 +615,8 @@ impl UserOrganization { } pub fn has_full_access(&self) -> bool { - (self.access_all || self.atype >= UserOrgType::Admin) && self.has_status(UserOrgStatus::Confirmed) + (self.access_all || self.atype >= UserOrgType::Admin) + && self.has_status(UserOrgStatus::Confirmed) } pub async fn find_by_uuid(uuid: &str, conn: &mut DbConn) -> Option<Self> { @@ -605,7 +628,11 @@ impl UserOrganization { }} } - pub async fn find_by_uuid_and_org(uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> { + pub async fn find_by_uuid_and_org( + uuid: &str, + org_uuid: &str, + conn: &mut DbConn, + ) -> Option<Self> { db_run! { conn: { users_organizations::table .filter(users_organizations::uuid.eq(uuid)) @@ -676,7 +703,11 @@ impl UserOrganization { }} } - pub async fn find_by_org_and_type(org_uuid: &str, atype: UserOrgType, conn: &mut DbConn) -> Vec<Self> { + pub async fn find_by_org_and_type( + org_uuid: &str, + atype: UserOrgType, + conn: &mut DbConn, + ) -> Vec<Self> { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -686,7 +717,11 @@ impl UserOrganization { }} } - pub async fn count_confirmed_by_org_and_type(org_uuid: &str, atype: UserOrgType, conn: &mut DbConn) -> i64 { + pub async fn count_confirmed_by_org_and_type( + org_uuid: &str, + atype: UserOrgType, + conn: &mut DbConn, + ) -> i64 { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -698,7 +733,11 @@ impl UserOrganization { }} } - pub async fn find_by_user_and_org(user_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> { + pub async fn find_by_user_and_org( + user_uuid: &str, + org_uuid: &str, + conn: &mut DbConn, + ) -> Option<Self> { db_run! { conn: { users_organizations::table .filter(users_organizations::user_uuid.eq(user_uuid)) @@ -727,7 +766,11 @@ impl UserOrganization { }} } - pub async fn find_by_user_and_policy(user_uuid: &str, policy_type: OrgPolicyType, conn: &mut DbConn) -> Vec<Self> { + pub async fn find_by_user_and_policy( + user_uuid: &str, + policy_type: OrgPolicyType, + conn: &mut DbConn, + ) -> Vec<Self> { db_run! { conn: { users_organizations::table .inner_join( @@ -746,7 +789,11 @@ impl UserOrganization { }} } - pub async fn find_by_cipher_and_org(cipher_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec<Self> { + pub async fn find_by_cipher_and_org( + cipher_uuid: &str, + org_uuid: &str, + conn: &mut DbConn, + ) -> Vec<Self> { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -769,7 +816,11 @@ impl UserOrganization { }} } - pub async fn user_has_ge_admin_access_to_cipher(user_uuid: &str, cipher_uuid: &str, conn: &mut DbConn) -> bool { + pub async fn user_has_ge_admin_access_to_cipher( + user_uuid: &str, + cipher_uuid: &str, + conn: &mut DbConn, + ) -> bool { db_run! { conn: { users_organizations::table .inner_join(ciphers::table.on(ciphers::uuid.eq(cipher_uuid).and(ciphers::organization_uuid.eq(users_organizations::org_uuid.nullable())))) @@ -781,7 +832,11 @@ impl UserOrganization { }} } - pub async fn find_by_collection_and_org(collection_uuid: &str, org_uuid: &str, conn: &mut DbConn) -> Vec<Self> { + pub async fn find_by_collection_and_org( + collection_uuid: &str, + org_uuid: &str, + conn: &mut DbConn, + ) -> Vec<Self> { db_run! { conn: { users_organizations::table .filter(users_organizations::org_uuid.eq(org_uuid)) @@ -798,7 +853,11 @@ impl UserOrganization { }} } - pub async fn find_by_external_id_and_org(ext_id: &str, org_uuid: &str, conn: &mut DbConn) -> Option<Self> { + pub async fn find_by_external_id_and_org( + ext_id: &str, + org_uuid: &str, + conn: &mut DbConn, + ) -> Option<Self> { db_run! {conn: { users_organizations::table .filter( diff --git a/src/db/models/two_factor.rs b/src/db/models/two_factor.rs @@ -20,18 +20,7 @@ db_object! { #[derive(num_derive::FromPrimitive)] pub enum TwoFactorType { Authenticator = 0, - Email = 1, - Duo = 2, - YubiKey = 3, - U2f = 4, - Remember = 5, - OrganizationDuo = 6, Webauthn = 7, - - // These are implementation details - U2fRegisterChallenge = 1000, - U2fLoginChallenge = 1001, - EmailVerificationChallenge = 1002, WebauthnRegisterChallenge = 1003, WebauthnLoginChallenge = 1004, } @@ -126,7 +115,11 @@ impl TwoFactor { }} } - pub async fn find_by_user_and_type(user_uuid: &str, atype: i32, conn: &mut DbConn) -> Option<Self> { + pub async fn find_by_user_and_type( + user_uuid: &str, + atype: i32, + conn: &mut DbConn, + ) -> Option<Self> { db_run! { conn: { twofactor::table .filter(twofactor::user_uuid.eq(user_uuid)) @@ -144,73 +137,4 @@ impl TwoFactor { .map_res("Error deleting twofactors") }} } - - pub async fn migrate_u2f_to_webauthn(conn: &mut DbConn) -> EmptyResult { - let u2f_factors = db_run! { conn: { - twofactor::table - .filter(twofactor::atype.eq(TwoFactorType::U2f as i32)) - .load::<TwoFactorDb>(conn) - .expect("Error loading twofactor") - .from_db() - }}; - - use crate::api::core::two_factor::webauthn::U2FRegistration; - use crate::api::core::two_factor::webauthn::{get_webauthn_registrations, WebauthnRegistration}; - use webauthn_rs::proto::*; - - for mut u2f in u2f_factors { - let mut regs: Vec<U2FRegistration> = serde_json::from_str(&u2f.data)?; - // If there are no registrations or they are migrated (we do the migration in batch so we can consider them all migrated when the first one is) - if regs.is_empty() || regs[0].migrated == Some(true) { - continue; - } - - let (_, mut webauthn_regs) = get_webauthn_registrations(&u2f.user_uuid, conn).await?; - - // If the user already has webauthn registrations saved, don't overwrite them - if !webauthn_regs.is_empty() { - continue; - } - - for reg in &mut regs { - let x: [u8; 32] = reg.reg.pub_key[1..33].try_into().unwrap(); - let y: [u8; 32] = reg.reg.pub_key[33..65].try_into().unwrap(); - - let key = COSEKey { - type_: COSEAlgorithm::ES256, - key: COSEKeyType::EC_EC2(COSEEC2Key { - curve: ECDSACurve::SECP256R1, - x, - y, - }), - }; - - let new_reg = WebauthnRegistration { - id: reg.id, - migrated: true, - name: reg.name.clone(), - credential: Credential { - counter: reg.counter, - verified: false, - cred: key, - cred_id: reg.reg.key_handle.clone(), - registration_policy: UserVerificationPolicy::Discouraged, - }, - }; - - webauthn_regs.push(new_reg); - - reg.migrated = Some(true); - } - - u2f.data = serde_json::to_string(&regs)?; - u2f.save(conn).await?; - - TwoFactor::new(u2f.user_uuid.clone(), TwoFactorType::Webauthn, serde_json::to_string(&webauthn_regs)?) - .save(conn) - .await?; - } - - Ok(()) - } } diff --git a/src/db/schemas/mysql/schema.rs b/src/db/schemas/mysql/schema.rs @@ -1,363 +0,0 @@ -table! { - attachments (id) { - id -> Text, - cipher_uuid -> Text, - file_name -> Text, - file_size -> Integer, - akey -> Nullable<Text>, - } -} - -table! { - ciphers (uuid) { - uuid -> Text, - created_at -> Datetime, - updated_at -> Datetime, - user_uuid -> Nullable<Text>, - organization_uuid -> Nullable<Text>, - key -> Nullable<Text>, - atype -> Integer, - name -> Text, - notes -> Nullable<Text>, - fields -> Nullable<Text>, - data -> Text, - password_history -> Nullable<Text>, - deleted_at -> Nullable<Datetime>, - reprompt -> Nullable<Integer>, - } -} - -table! { - ciphers_collections (cipher_uuid, collection_uuid) { - cipher_uuid -> Text, - collection_uuid -> Text, - } -} - -table! { - collections (uuid) { - uuid -> Text, - org_uuid -> Text, - name -> Text, - external_id -> Nullable<Text>, - } -} - -table! { - devices (uuid, user_uuid) { - uuid -> Text, - created_at -> Datetime, - updated_at -> Datetime, - user_uuid -> Text, - name -> Text, - atype -> Integer, - push_uuid -> Nullable<Text>, - push_token -> Nullable<Text>, - refresh_token -> Text, - twofactor_remember -> Nullable<Text>, - } -} - -table! { - event (uuid) { - uuid -> Varchar, - event_type -> Integer, - user_uuid -> Nullable<Varchar>, - org_uuid -> Nullable<Varchar>, - cipher_uuid -> Nullable<Varchar>, - collection_uuid -> Nullable<Varchar>, - group_uuid -> Nullable<Varchar>, - org_user_uuid -> Nullable<Varchar>, - act_user_uuid -> Nullable<Varchar>, - device_type -> Nullable<Integer>, - ip_address -> Nullable<Text>, - event_date -> Timestamp, - policy_uuid -> Nullable<Varchar>, - provider_uuid -> Nullable<Varchar>, - provider_user_uuid -> Nullable<Varchar>, - provider_org_uuid -> Nullable<Varchar>, - } -} - -table! { - favorites (user_uuid, cipher_uuid) { - user_uuid -> Text, - cipher_uuid -> Text, - } -} - -table! { - folders (uuid) { - uuid -> Text, - created_at -> Datetime, - updated_at -> Datetime, - user_uuid -> Text, - name -> Text, - } -} - -table! { - folders_ciphers (cipher_uuid, folder_uuid) { - cipher_uuid -> Text, - folder_uuid -> Text, - } -} - -table! { - invitations (email) { - email -> Text, - } -} - -table! { - org_policies (uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - } -} - -table! { - organizations (uuid) { - uuid -> Text, - name -> Text, - billing_email -> Text, - private_key -> Nullable<Text>, - public_key -> Nullable<Text>, - } -} - -table! { - sends (uuid) { - uuid -> Text, - user_uuid -> Nullable<Text>, - organization_uuid -> Nullable<Text>, - name -> Text, - notes -> Nullable<Text>, - atype -> Integer, - data -> Text, - akey -> Text, - password_hash -> Nullable<Binary>, - password_salt -> Nullable<Binary>, - password_iter -> Nullable<Integer>, - max_access_count -> Nullable<Integer>, - access_count -> Integer, - creation_date -> Datetime, - revision_date -> Datetime, - expiration_date -> Nullable<Datetime>, - deletion_date -> Datetime, - disabled -> Bool, - hide_email -> Nullable<Bool>, - } -} - -table! { - twofactor (uuid) { - uuid -> Text, - user_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - last_used -> Integer, - } -} - -table! { - twofactor_incomplete (user_uuid, device_uuid) { - user_uuid -> Text, - device_uuid -> Text, - device_name -> Text, - login_time -> Timestamp, - ip_address -> Text, - } -} - -table! { - users (uuid) { - uuid -> Text, - enabled -> Bool, - created_at -> Datetime, - updated_at -> Datetime, - verified_at -> Nullable<Datetime>, - last_verifying_at -> Nullable<Datetime>, - login_verify_count -> Integer, - email -> Text, - email_new -> Nullable<Text>, - email_new_token -> Nullable<Text>, - name -> Text, - password_hash -> Binary, - salt -> Binary, - password_iterations -> Integer, - password_hint -> Nullable<Text>, - akey -> Text, - private_key -> Nullable<Text>, - public_key -> Nullable<Text>, - totp_secret -> Nullable<Text>, - totp_recover -> Nullable<Text>, - security_stamp -> Text, - stamp_exception -> Nullable<Text>, - equivalent_domains -> Text, - excluded_globals -> Text, - client_kdf_type -> Integer, - client_kdf_iter -> Integer, - client_kdf_memory -> Nullable<Integer>, - client_kdf_parallelism -> Nullable<Integer>, - api_key -> Nullable<Text>, - avatar_color -> Nullable<Text>, - external_id -> Nullable<Text>, - } -} - -table! { - users_collections (user_uuid, collection_uuid) { - user_uuid -> Text, - collection_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - } -} - -table! { - users_organizations (uuid) { - uuid -> Text, - user_uuid -> Text, - org_uuid -> Text, - access_all -> Bool, - akey -> Text, - status -> Integer, - atype -> Integer, - reset_password_key -> Nullable<Text>, - external_id -> Nullable<Text>, - } -} - -table! { - organization_api_key (uuid, org_uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - api_key -> Text, - revision_date -> Timestamp, - } -} - -table! { - emergency_access (uuid) { - uuid -> Text, - grantor_uuid -> Text, - grantee_uuid -> Nullable<Text>, - email -> Nullable<Text>, - key_encrypted -> Nullable<Text>, - atype -> Integer, - status -> Integer, - wait_time_days -> Integer, - recovery_initiated_at -> Nullable<Timestamp>, - last_notification_at -> Nullable<Timestamp>, - updated_at -> Timestamp, - created_at -> Timestamp, - } -} - -table! { - groups (uuid) { - uuid -> Text, - organizations_uuid -> Text, - name -> Text, - access_all -> Bool, - external_id -> Nullable<Text>, - creation_date -> Timestamp, - revision_date -> Timestamp, - } -} - -table! { - groups_users (groups_uuid, users_organizations_uuid) { - groups_uuid -> Text, - users_organizations_uuid -> Text, - } -} - -table! { - collections_groups (collections_uuid, groups_uuid) { - collections_uuid -> Text, - groups_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - } -} - -table! { - auth_requests (uuid) { - uuid -> Text, - user_uuid -> Text, - organization_uuid -> Nullable<Text>, - request_device_identifier -> Text, - device_type -> Integer, - request_ip -> Text, - response_device_id -> Nullable<Text>, - access_code -> Text, - public_key -> Text, - enc_key -> Nullable<Text>, - master_password_hash -> Nullable<Text>, - approved -> Nullable<Bool>, - creation_date -> Timestamp, - response_date -> Nullable<Timestamp>, - authentication_date -> Nullable<Timestamp>, - } -} - -joinable!(attachments -> ciphers (cipher_uuid)); -joinable!(ciphers -> organizations (organization_uuid)); -joinable!(ciphers -> users (user_uuid)); -joinable!(ciphers_collections -> ciphers (cipher_uuid)); -joinable!(ciphers_collections -> collections (collection_uuid)); -joinable!(collections -> organizations (org_uuid)); -joinable!(devices -> users (user_uuid)); -joinable!(folders -> users (user_uuid)); -joinable!(folders_ciphers -> ciphers (cipher_uuid)); -joinable!(folders_ciphers -> folders (folder_uuid)); -joinable!(org_policies -> organizations (org_uuid)); -joinable!(sends -> organizations (organization_uuid)); -joinable!(sends -> users (user_uuid)); -joinable!(twofactor -> users (user_uuid)); -joinable!(users_collections -> collections (collection_uuid)); -joinable!(users_collections -> users (user_uuid)); -joinable!(users_organizations -> organizations (org_uuid)); -joinable!(users_organizations -> users (user_uuid)); -joinable!(users_organizations -> ciphers (org_uuid)); -joinable!(organization_api_key -> organizations (org_uuid)); -joinable!(emergency_access -> users (grantor_uuid)); -joinable!(groups -> organizations (organizations_uuid)); -joinable!(groups_users -> users_organizations (users_organizations_uuid)); -joinable!(groups_users -> groups (groups_uuid)); -joinable!(collections_groups -> collections (collections_uuid)); -joinable!(collections_groups -> groups (groups_uuid)); -joinable!(event -> users_organizations (uuid)); -joinable!(auth_requests -> users (user_uuid)); - -allow_tables_to_appear_in_same_query!( - attachments, - ciphers, - ciphers_collections, - collections, - devices, - folders, - folders_ciphers, - invitations, - org_policies, - organizations, - sends, - twofactor, - users, - users_collections, - users_organizations, - organization_api_key, - emergency_access, - groups, - groups_users, - collections_groups, - event, - auth_requests, -); diff --git a/src/db/schemas/postgresql/schema.rs b/src/db/schemas/postgresql/schema.rs @@ -1,363 +0,0 @@ -table! { - attachments (id) { - id -> Text, - cipher_uuid -> Text, - file_name -> Text, - file_size -> Integer, - akey -> Nullable<Text>, - } -} - -table! { - ciphers (uuid) { - uuid -> Text, - created_at -> Timestamp, - updated_at -> Timestamp, - user_uuid -> Nullable<Text>, - organization_uuid -> Nullable<Text>, - key -> Nullable<Text>, - atype -> Integer, - name -> Text, - notes -> Nullable<Text>, - fields -> Nullable<Text>, - data -> Text, - password_history -> Nullable<Text>, - deleted_at -> Nullable<Timestamp>, - reprompt -> Nullable<Integer>, - } -} - -table! { - ciphers_collections (cipher_uuid, collection_uuid) { - cipher_uuid -> Text, - collection_uuid -> Text, - } -} - -table! { - collections (uuid) { - uuid -> Text, - org_uuid -> Text, - name -> Text, - external_id -> Nullable<Text>, - } -} - -table! { - devices (uuid, user_uuid) { - uuid -> Text, - created_at -> Timestamp, - updated_at -> Timestamp, - user_uuid -> Text, - name -> Text, - atype -> Integer, - push_uuid -> Nullable<Text>, - push_token -> Nullable<Text>, - refresh_token -> Text, - twofactor_remember -> Nullable<Text>, - } -} - -table! { - event (uuid) { - uuid -> Text, - event_type -> Integer, - user_uuid -> Nullable<Text>, - org_uuid -> Nullable<Text>, - cipher_uuid -> Nullable<Text>, - collection_uuid -> Nullable<Text>, - group_uuid -> Nullable<Text>, - org_user_uuid -> Nullable<Text>, - act_user_uuid -> Nullable<Text>, - device_type -> Nullable<Integer>, - ip_address -> Nullable<Text>, - event_date -> Timestamp, - policy_uuid -> Nullable<Text>, - provider_uuid -> Nullable<Text>, - provider_user_uuid -> Nullable<Text>, - provider_org_uuid -> Nullable<Text>, - } -} - -table! { - favorites (user_uuid, cipher_uuid) { - user_uuid -> Text, - cipher_uuid -> Text, - } -} - -table! { - folders (uuid) { - uuid -> Text, - created_at -> Timestamp, - updated_at -> Timestamp, - user_uuid -> Text, - name -> Text, - } -} - -table! { - folders_ciphers (cipher_uuid, folder_uuid) { - cipher_uuid -> Text, - folder_uuid -> Text, - } -} - -table! { - invitations (email) { - email -> Text, - } -} - -table! { - org_policies (uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - } -} - -table! { - organizations (uuid) { - uuid -> Text, - name -> Text, - billing_email -> Text, - private_key -> Nullable<Text>, - public_key -> Nullable<Text>, - } -} - -table! { - sends (uuid) { - uuid -> Text, - user_uuid -> Nullable<Text>, - organization_uuid -> Nullable<Text>, - name -> Text, - notes -> Nullable<Text>, - atype -> Integer, - data -> Text, - akey -> Text, - password_hash -> Nullable<Binary>, - password_salt -> Nullable<Binary>, - password_iter -> Nullable<Integer>, - max_access_count -> Nullable<Integer>, - access_count -> Integer, - creation_date -> Timestamp, - revision_date -> Timestamp, - expiration_date -> Nullable<Timestamp>, - deletion_date -> Timestamp, - disabled -> Bool, - hide_email -> Nullable<Bool>, - } -} - -table! { - twofactor (uuid) { - uuid -> Text, - user_uuid -> Text, - atype -> Integer, - enabled -> Bool, - data -> Text, - last_used -> Integer, - } -} - -table! { - twofactor_incomplete (user_uuid, device_uuid) { - user_uuid -> Text, - device_uuid -> Text, - device_name -> Text, - login_time -> Timestamp, - ip_address -> Text, - } -} - -table! { - users (uuid) { - uuid -> Text, - enabled -> Bool, - created_at -> Timestamp, - updated_at -> Timestamp, - verified_at -> Nullable<Timestamp>, - last_verifying_at -> Nullable<Timestamp>, - login_verify_count -> Integer, - email -> Text, - email_new -> Nullable<Text>, - email_new_token -> Nullable<Text>, - name -> Text, - password_hash -> Binary, - salt -> Binary, - password_iterations -> Integer, - password_hint -> Nullable<Text>, - akey -> Text, - private_key -> Nullable<Text>, - public_key -> Nullable<Text>, - totp_secret -> Nullable<Text>, - totp_recover -> Nullable<Text>, - security_stamp -> Text, - stamp_exception -> Nullable<Text>, - equivalent_domains -> Text, - excluded_globals -> Text, - client_kdf_type -> Integer, - client_kdf_iter -> Integer, - client_kdf_memory -> Nullable<Integer>, - client_kdf_parallelism -> Nullable<Integer>, - api_key -> Nullable<Text>, - avatar_color -> Nullable<Text>, - external_id -> Nullable<Text>, - } -} - -table! { - users_collections (user_uuid, collection_uuid) { - user_uuid -> Text, - collection_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - } -} - -table! { - users_organizations (uuid) { - uuid -> Text, - user_uuid -> Text, - org_uuid -> Text, - access_all -> Bool, - akey -> Text, - status -> Integer, - atype -> Integer, - reset_password_key -> Nullable<Text>, - external_id -> Nullable<Text>, - } -} - -table! { - organization_api_key (uuid, org_uuid) { - uuid -> Text, - org_uuid -> Text, - atype -> Integer, - api_key -> Text, - revision_date -> Timestamp, - } -} - -table! { - emergency_access (uuid) { - uuid -> Text, - grantor_uuid -> Text, - grantee_uuid -> Nullable<Text>, - email -> Nullable<Text>, - key_encrypted -> Nullable<Text>, - atype -> Integer, - status -> Integer, - wait_time_days -> Integer, - recovery_initiated_at -> Nullable<Timestamp>, - last_notification_at -> Nullable<Timestamp>, - updated_at -> Timestamp, - created_at -> Timestamp, - } -} - -table! { - groups (uuid) { - uuid -> Text, - organizations_uuid -> Text, - name -> Text, - access_all -> Bool, - external_id -> Nullable<Text>, - creation_date -> Timestamp, - revision_date -> Timestamp, - } -} - -table! { - groups_users (groups_uuid, users_organizations_uuid) { - groups_uuid -> Text, - users_organizations_uuid -> Text, - } -} - -table! { - collections_groups (collections_uuid, groups_uuid) { - collections_uuid -> Text, - groups_uuid -> Text, - read_only -> Bool, - hide_passwords -> Bool, - } -} - -table! { - auth_requests (uuid) { - uuid -> Text, - user_uuid -> Text, - organization_uuid -> Nullable<Text>, - request_device_identifier -> Text, - device_type -> Integer, - request_ip -> Text, - response_device_id -> Nullable<Text>, - access_code -> Text, - public_key -> Text, - enc_key -> Nullable<Text>, - master_password_hash -> Nullable<Text>, - approved -> Nullable<Bool>, - creation_date -> Timestamp, - response_date -> Nullable<Timestamp>, - authentication_date -> Nullable<Timestamp>, - } -} - -joinable!(attachments -> ciphers (cipher_uuid)); -joinable!(ciphers -> organizations (organization_uuid)); -joinable!(ciphers -> users (user_uuid)); -joinable!(ciphers_collections -> ciphers (cipher_uuid)); -joinable!(ciphers_collections -> collections (collection_uuid)); -joinable!(collections -> organizations (org_uuid)); -joinable!(devices -> users (user_uuid)); -joinable!(folders -> users (user_uuid)); -joinable!(folders_ciphers -> ciphers (cipher_uuid)); -joinable!(folders_ciphers -> folders (folder_uuid)); -joinable!(org_policies -> organizations (org_uuid)); -joinable!(sends -> organizations (organization_uuid)); -joinable!(sends -> users (user_uuid)); -joinable!(twofactor -> users (user_uuid)); -joinable!(users_collections -> collections (collection_uuid)); -joinable!(users_collections -> users (user_uuid)); -joinable!(users_organizations -> organizations (org_uuid)); -joinable!(users_organizations -> users (user_uuid)); -joinable!(users_organizations -> ciphers (org_uuid)); -joinable!(organization_api_key -> organizations (org_uuid)); -joinable!(emergency_access -> users (grantor_uuid)); -joinable!(groups -> organizations (organizations_uuid)); -joinable!(groups_users -> users_organizations (users_organizations_uuid)); -joinable!(groups_users -> groups (groups_uuid)); -joinable!(collections_groups -> collections (collections_uuid)); -joinable!(collections_groups -> groups (groups_uuid)); -joinable!(event -> users_organizations (uuid)); -joinable!(auth_requests -> users (user_uuid)); - -allow_tables_to_appear_in_same_query!( - attachments, - ciphers, - ciphers_collections, - collections, - devices, - folders, - folders_ciphers, - invitations, - org_policies, - organizations, - sends, - twofactor, - users, - users_collections, - users_organizations, - organization_api_key, - emergency_access, - groups, - groups_users, - collections_groups, - event, - auth_requests, -); diff --git a/src/error.rs b/src/error.rs @@ -36,7 +36,8 @@ macro_rules! make_error { } }; } - +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +use core::convert::Infallible; use diesel::r2d2::PoolError as R2d2Err; use diesel::result::Error as DieselErr; use diesel::ConnectionError as DieselConErr; @@ -46,15 +47,15 @@ use lettre::address::AddressError as AddrErr; use lettre::error::Error as LettreErr; use lettre::transport::smtp::Error as SmtpErr; use openssl::error::ErrorStack as SSLErr; +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +use priv_sep::UnveilErr; use regex::Error as RegexErr; -use reqwest::Error as ReqErr; use rocket::error::Error as RocketErr; use serde_json::{Error as SerdeErr, Value}; use std::io::Error as IoErr; use std::time::SystemTimeError as TimeErr; use tokio_tungstenite::tungstenite::Error as TungstError; use webauthn_rs::error::WebauthnError as WebauthnErr; -use yubico::yubicoerror::YubicoError as YubiErr; #[derive(Serialize)] pub struct Empty {} @@ -64,6 +65,7 @@ pub struct Empty {} // // After the variant itself, there are two expressions. The first one indicates whether the error contains a source error (that we pretty print). // The second one contains the function used to obtain the response sent to the client +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] make_error! { // Just an empty error Empty(Empty): _no_source, _serialize, @@ -79,9 +81,7 @@ make_error! { Io(IoErr): _has_source, _api_error, Time(TimeErr): _has_source, _api_error, - Req(ReqErr): _has_source, _api_error, Regex(RegexErr): _has_source, _api_error, - Yubico(YubiErr): _has_source, _api_error, Lettre(LettreErr): _has_source, _api_error, Address(AddrErr): _has_source, _api_error, @@ -93,7 +93,47 @@ make_error! { Webauthn(WebauthnErr): _has_source, _api_error, WebSocket(TungstError): _has_source, _api_error, } +// Error struct +// Contains a String error message, meant for the user and an enum variant, with an error of different types. +// +// After the variant itself, there are two expressions. The first one indicates whether the error contains a source error (that we pretty print). +// The second one contains the function used to obtain the response sent to the client +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +make_error! { + // Just an empty error + Empty(Empty): _no_source, _serialize, + // Used to represent err! calls + Simple(String): _no_source, _api_error, + // Used for special return values, like 2FA errors + Json(Value): _no_source, _serialize, + Db(DieselErr): _has_source, _api_error, + R2d2(R2d2Err): _has_source, _api_error, + Serde(SerdeErr): _has_source, _api_error, + JWt(JwtErr): _has_source, _api_error, + Handlebars(HbErr): _has_source, _api_error, + + Io(IoErr): _has_source, _api_error, + Time(TimeErr): _has_source, _api_error, + Regex(RegexErr): _has_source, _api_error, + Lettre(LettreErr): _has_source, _api_error, + Address(AddrErr): _has_source, _api_error, + Smtp(SmtpErr): _has_source, _api_error, + OpenSSL(SSLErr): _has_source, _api_error, + Rocket(RocketErr): _has_source, _api_error, + Unveil(UnveilErr): _has_source, _api_error, + + DieselCon(DieselConErr): _has_source, _api_error, + Webauthn(WebauthnErr): _has_source, _api_error, + WebSocket(TungstError): _has_source, _api_error, +} +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +impl From<Infallible> for Error { + #[inline] + fn from(value: Infallible) -> Self { + match value {} + } +} impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.source() { @@ -211,12 +251,16 @@ impl<'r> Responder<'r, 'static> for Error { match self.error { ErrorKind::Empty(_) => {} // Don't print the error in this situation ErrorKind::Simple(_) => {} // Don't print the error in this situation - _ => error!(target: "error", "{:#?}", self), + _ => {} }; let code = Status::from_code(self.error_code).unwrap_or(Status::BadRequest); let body = self.to_string(); - Response::build().status(code).header(ContentType::JSON).sized_body(Some(body.len()), Cursor::new(body)).ok() + Response::build() + .status(code) + .header(ContentType::JSON) + .sized_body(Some(body.len()), Cursor::new(body)) + .ok() } } @@ -226,19 +270,15 @@ impl<'r> Responder<'r, 'static> for Error { #[macro_export] macro_rules! err { ($msg:expr) => {{ - error!("{}", $msg); return Err($crate::error::Error::new($msg, $msg)); }}; ($msg:expr, ErrorEvent $err_event:tt) => {{ - error!("{}", $msg); return Err($crate::error::Error::new($msg, $msg).with_event($crate::error::ErrorEvent $err_event)); }}; ($usr_msg:expr, $log_value:expr) => {{ - error!("{}. {}", $usr_msg, $log_value); return Err($crate::error::Error::new($usr_msg, $log_value)); }}; ($usr_msg:expr, $log_value:expr, ErrorEvent $err_event:tt) => {{ - error!("{}. {}", $usr_msg, $log_value); return Err($crate::error::Error::new($usr_msg, $log_value).with_event($crate::error::ErrorEvent $err_event)); }}; } @@ -256,11 +296,9 @@ macro_rules! err_silent { #[macro_export] macro_rules! err_code { ($msg:expr, $err_code:expr) => {{ - error!("{}", $msg); return Err($crate::error::Error::new($msg, $msg).with_code($err_code)); }}; ($usr_msg:expr, $log_value:expr, $err_code:expr) => {{ - error!("{}. {}", $usr_msg, $log_value); return Err($crate::error::Error::new($usr_msg, $log_value).with_code($err_code)); }}; } @@ -290,11 +328,9 @@ macro_rules! err_json { #[macro_export] macro_rules! err_handler { ($expr:expr) => {{ - error!(target: "auth", "Unauthorized Error: {}", $expr); return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $expr)); }}; ($usr_msg:expr, $log_value:expr) => {{ - error!(target: "auth", "Unauthorized Error: {}. {}", $usr_msg, $log_value); return ::rocket::request::Outcome::Failure((rocket::http::Status::Unauthorized, $usr_msg)); }}; } diff --git a/src/mail.rs b/src/mail.rs @@ -14,8 +14,8 @@ use lettre::{ use crate::{ api::EmptyResult, auth::{ - encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, generate_invite_claims, - generate_verify_email_claims, + encode_jwt, generate_delete_claims, generate_emergency_access_invite_claims, + generate_invite_claims, generate_verify_email_claims, }, error::Error, CONFIG, @@ -69,12 +69,18 @@ fn smtp_transport() -> AsyncSmtpTransport<Tokio1Executor> { let smtp_client = match CONFIG.smtp_auth_mechanism() { Some(mechanism) => { - let allowed_mechanisms = [SmtpAuthMechanism::Plain, SmtpAuthMechanism::Login, SmtpAuthMechanism::Xoauth2]; + let allowed_mechanisms = [ + SmtpAuthMechanism::Plain, + SmtpAuthMechanism::Login, + SmtpAuthMechanism::Xoauth2, + ]; let mut selected_mechanisms = vec![]; for wanted_mechanism in mechanism.split(',') { for m in &allowed_mechanisms { if m.to_string().to_lowercase() - == wanted_mechanism.trim_matches(|c| c == '"' || c == '\'' || c == ' ').to_lowercase() + == wanted_mechanism + .trim_matches(|c| c == '"' || c == '\'' || c == ' ') + .to_lowercase() { selected_mechanisms.push(*m); } @@ -85,7 +91,10 @@ fn smtp_transport() -> AsyncSmtpTransport<Tokio1Executor> { smtp_client.authentication(selected_mechanisms) } else { // Only show a warning, and return without setting an actual authentication mechanism - warn!("No valid SMTP Auth mechanism found for '{}', using default values", mechanism); + warn!( + "No valid SMTP Auth mechanism found for '{}', using default values", + mechanism + ); smtp_client } } @@ -95,7 +104,10 @@ fn smtp_transport() -> AsyncSmtpTransport<Tokio1Executor> { smtp_client.build() } -fn get_text(template_name: &'static str, data: serde_json::Value) -> Result<(String, String, String), Error> { +fn get_text( + template_name: &'static str, + data: serde_json::Value, +) -> Result<(String, String, String), Error> { let (subject_html, body_html) = get_template(&format!("{template_name}.html"), &data)?; let (_subject_text, body_text) = get_template(template_name, &data)?; Ok((subject_html, body_html, body_text)) @@ -294,7 +306,10 @@ pub async fn send_emergency_access_invite( send_email(address, &subject, body_html, body_text).await } -pub async fn send_emergency_access_invite_accepted(address: &str, grantee_email: &str) -> EmptyResult { +pub async fn send_emergency_access_invite_accepted( + address: &str, + grantee_email: &str, +) -> EmptyResult { let (subject, body_html, body_text) = get_text( "email/emergency_access_invite_accepted", json!({ @@ -307,7 +322,10 @@ pub async fn send_emergency_access_invite_accepted(address: &str, grantee_email: send_email(address, &subject, body_html, body_text).await } -pub async fn send_emergency_access_invite_confirmed(address: &str, grantor_name: &str) -> EmptyResult { +pub async fn send_emergency_access_invite_confirmed( + address: &str, + grantor_name: &str, +) -> EmptyResult { let (subject, body_html, body_text) = get_text( "email/emergency_access_invite_confirmed", json!({ @@ -320,7 +338,10 @@ pub async fn send_emergency_access_invite_confirmed(address: &str, grantor_name: send_email(address, &subject, body_html, body_text).await } -pub async fn send_emergency_access_recovery_approved(address: &str, grantor_name: &str) -> EmptyResult { +pub async fn send_emergency_access_recovery_approved( + address: &str, + grantor_name: &str, +) -> EmptyResult { let (subject, body_html, body_text) = get_text( "email/emergency_access_recovery_approved", json!({ @@ -353,28 +374,11 @@ pub async fn send_emergency_access_recovery_initiated( send_email(address, &subject, body_html, body_text).await } -pub async fn send_emergency_access_recovery_reminder( +pub async fn send_emergency_access_recovery_rejected( address: &str, - grantee_name: &str, - atype: &str, - days_left: &str, + grantor_name: &str, ) -> EmptyResult { let (subject, body_html, body_text) = get_text( - "email/emergency_access_recovery_reminder", - json!({ - "url": CONFIG.domain(), - "img_src": CONFIG._smtp_img_src(), - "grantee_name": grantee_name, - "atype": atype, - "days_left": days_left, - }), - )?; - - send_email(address, &subject, body_html, body_text).await -} - -pub async fn send_emergency_access_recovery_rejected(address: &str, grantor_name: &str) -> EmptyResult { - let (subject, body_html, body_text) = get_text( "email/emergency_access_recovery_rejected", json!({ "url": CONFIG.domain(), @@ -386,21 +390,11 @@ pub async fn send_emergency_access_recovery_rejected(address: &str, grantor_name send_email(address, &subject, body_html, body_text).await } -pub async fn send_emergency_access_recovery_timed_out(address: &str, grantee_name: &str, atype: &str) -> EmptyResult { - let (subject, body_html, body_text) = get_text( - "email/emergency_access_recovery_timed_out", - json!({ - "url": CONFIG.domain(), - "img_src": CONFIG._smtp_img_src(), - "grantee_name": grantee_name, - "atype": atype, - }), - )?; - - send_email(address, &subject, body_html, body_text).await -} - -pub async fn send_invite_accepted(new_user_email: &str, address: &str, org_name: &str) -> EmptyResult { +pub async fn send_invite_accepted( + new_user_email: &str, + address: &str, + org_name: &str, +) -> EmptyResult { let (subject, body_html, body_text) = get_text( "email/invite_accepted", json!({ @@ -427,7 +421,12 @@ pub async fn send_invite_confirmed(address: &str, org_name: &str) -> EmptyResult send_email(address, &subject, body_html, body_text).await } -pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult { +pub async fn send_new_device_logged_in( + address: &str, + ip: &str, + dt: &NaiveDateTime, + device: &str, +) -> EmptyResult { use crate::util::upcase_first; let device = upcase_first(device); @@ -445,40 +444,6 @@ pub async fn send_new_device_logged_in(address: &str, ip: &str, dt: &NaiveDateTi send_email(address, &subject, body_html, body_text).await } - -pub async fn send_incomplete_2fa_login(address: &str, ip: &str, dt: &NaiveDateTime, device: &str) -> EmptyResult { - use crate::util::upcase_first; - let device = upcase_first(device); - - let fmt = "%A, %B %_d, %Y at %r %Z"; - let (subject, body_html, body_text) = get_text( - "email/incomplete_2fa_login", - json!({ - "url": CONFIG.domain(), - "img_src": CONFIG._smtp_img_src(), - "ip": ip, - "device": device, - "datetime": crate::util::format_naive_datetime_local(dt, fmt), - "time_limit": CONFIG.incomplete_2fa_time_limit(), - }), - )?; - - send_email(address, &subject, body_html, body_text).await -} - -pub async fn send_token(address: &str, token: &str) -> EmptyResult { - let (subject, body_html, body_text) = get_text( - "email/twofactor_email", - json!({ - "url": CONFIG.domain(), - "img_src": CONFIG._smtp_img_src(), - "token": token, - }), - )?; - - send_email(address, &subject, body_html, body_text).await -} - pub async fn send_change_email(address: &str, token: &str) -> EmptyResult { let (subject, body_html, body_text) = get_text( "email/change_email", @@ -492,19 +457,11 @@ pub async fn send_change_email(address: &str, token: &str) -> EmptyResult { send_email(address, &subject, body_html, body_text).await } -pub async fn send_test(address: &str) -> EmptyResult { - let (subject, body_html, body_text) = get_text( - "email/smtp_test", - json!({ - "url": CONFIG.domain(), - "img_src": CONFIG._smtp_img_src(), - }), - )?; - - send_email(address, &subject, body_html, body_text).await -} - -pub async fn send_admin_reset_password(address: &str, user_name: &str, org_name: &str) -> EmptyResult { +pub async fn send_admin_reset_password( + address: &str, + user_name: &str, + org_name: &str, +) -> EmptyResult { let (subject, body_html, body_text) = get_text( "email/admin_reset_password", json!({ @@ -569,32 +526,56 @@ async fn send_with_selected_transport(email: Message) -> EmptyResult { } } -async fn send_email(address: &str, subject: &str, body_html: String, body_text: String) -> EmptyResult { +async fn send_email( + address: &str, + subject: &str, + body_html: String, + body_text: String, +) -> EmptyResult { let smtp_from = &CONFIG.smtp_from(); let body = if CONFIG.smtp_embed_images() { - let logo_gray_body = Body::new(crate::api::static_files("logo-gray.png").unwrap().1.to_vec()); - let mail_github_body = Body::new(crate::api::static_files("mail-github.png").unwrap().1.to_vec()); - MultiPart::alternative().singlepart(SinglePart::plain(body_text)).multipart( - MultiPart::related() - .singlepart(SinglePart::html(body_html)) - .singlepart( - Attachment::new_inline(String::from("logo-gray.png")) - .body(logo_gray_body, "image/png".parse().unwrap()), - ) - .singlepart( - Attachment::new_inline(String::from("mail-github.png")) - .body(mail_github_body, "image/png".parse().unwrap()), - ), - ) + let logo_gray_body = Body::new( + crate::api::static_files("logo-gray.png") + .unwrap() + .1 + .to_vec(), + ); + let mail_github_body = Body::new( + crate::api::static_files("mail-github.png") + .unwrap() + .1 + .to_vec(), + ); + MultiPart::alternative() + .singlepart(SinglePart::plain(body_text)) + .multipart( + MultiPart::related() + .singlepart(SinglePart::html(body_html)) + .singlepart( + Attachment::new_inline(String::from("logo-gray.png")) + .body(logo_gray_body, "image/png".parse().unwrap()), + ) + .singlepart( + Attachment::new_inline(String::from("mail-github.png")) + .body(mail_github_body, "image/png".parse().unwrap()), + ), + ) } else { MultiPart::alternative_plain_html(body_text, body_html) }; let email = Message::builder() - .message_id(Some(format!("<{}@{}>", crate::util::get_uuid(), smtp_from.split('@').collect::<Vec<&str>>()[1]))) + .message_id(Some(format!( + "<{}@{}>", + crate::util::get_uuid(), + smtp_from.split('@').collect::<Vec<&str>>()[1] + ))) .to(Mailbox::new(None, Address::from_str(address)?)) - .from(Mailbox::new(Some(CONFIG.smtp_from_name()), Address::from_str(smtp_from)?)) + .from(Mailbox::new( + Some(CONFIG.smtp_from_name()), + Address::from_str(smtp_from)?, + )) .subject(subject) .multipart(body)?; diff --git a/src/main.rs b/src/main.rs @@ -29,19 +29,12 @@ clippy::verbose_file_reads, clippy::zero_sized_map_values )] -#![cfg_attr(feature = "unstable", feature(ip))] // The recursion_limit is mainly triggered by the json!() macro. // The more key/value pairs there are the more recursion occurs. // We want to keep this as low as possible, but not higher then 128. // If you go above 128 it will cause rust-analyzer to fail, #![recursion_limit = "103"] - -// When enabled use MiMalloc as malloc instead of the default malloc -#[cfg(feature = "enable_mimalloc")] -use mimalloc::MiMalloc; -#[cfg(feature = "enable_mimalloc")] -#[cfg_attr(feature = "enable_mimalloc", global_allocator)] -static GLOBAL: MiMalloc = MiMalloc; +use priv_sep::unveil_create_read_write; #[macro_use] extern crate rocket; @@ -50,26 +43,15 @@ extern crate serde; #[macro_use] extern crate serde_json; #[macro_use] -extern crate log; -#[macro_use] extern crate diesel; #[macro_use] extern crate diesel_migrations; use std::{ fs::{canonicalize, create_dir_all}, - panic, path::Path, process::exit, - str::FromStr, - thread, -}; - -use tokio::{ - fs::File, - io::{AsyncBufReadExt, BufReader}, }; - #[macro_use] mod error; mod api; @@ -79,150 +61,100 @@ mod crypto; #[macro_use] mod db; mod mail; +mod priv_sep; mod ratelimit; mod util; - -use crate::api::purge_auth_requests; -use crate::api::WS_ANONYMOUS_SUBSCRIPTIONS; pub use config::CONFIG; pub use error::{Error, MapResult}; use rocket::data::{Limits, ToByteUnit}; +use std::env; +use std::path::PathBuf; use std::sync::Arc; -pub use util::is_running_in_docker; +use tokio::runtime::Builder; -#[rocket::main] -async fn main() -> Result<(), Error> { - parse_args(); +fn main() -> Result<(), Error> { + let mut promises = priv_sep::pledge_init()?; launch_info(); - - use log::LevelFilter as LF; - let level = LF::from_str(&CONFIG.log_level()).expect("Valid log level"); - init_logging(level).ok(); - - let extra_debug = matches!(level, LF::Trace | LF::Debug); - - check_data_folder().await; - check_rsa_keys().unwrap_or_else(|_| { - error!("Error creating keys, exiting..."); - exit(1); - }); + let cur_dir = env::current_dir()?; + priv_sep::unveil_read(cur_dir.as_path())?; + static_init(); + validate_config(cur_dir)?; + check_data_folder(); + check_rsa_keys().expect("error creating keys"); check_web_vault(); - create_dir(&CONFIG.icon_cache_folder(), "icon cache"); create_dir(&CONFIG.tmp_folder(), "tmp folder"); create_dir(&CONFIG.sends_folder(), "sends folder"); create_dir(&CONFIG.attachments_folder(), "attachments folder"); - - let pool = create_db_pool().await; - schedule_jobs(pool.clone()); - crate::db::models::TwoFactor::migrate_u2f_to_webauthn(&mut pool.get().await.unwrap()).await.unwrap(); - - launch_rocket(pool, extra_debug).await // Blocks until program termination. + Builder::new_multi_thread() + .enable_all() + .build() + .map_or_else( + |e| Err(Error::from(e)), + |runtime| { + runtime.block_on(async { + let config = rocket::Config::from(rocket::Config::figment()); + config.tls.as_ref().map_or(Ok(()), |tls| { + tls.certs().left().map_or(Ok(()), |certs| { + priv_sep::unveil_read(certs).and_then(|()| { + tls.key().left().map_or(Ok(()), priv_sep::unveil_read) + }) + }) + })?; + priv_sep::pledge_away_unveil(&mut promises)?; + launch_rocket(create_db_pool().await, config).await + }) + }, + ) } - -const HELP: &str = "\ -Alternative implementation of the Bitwarden server API written in Rust - -USAGE: - vaultwarden [FLAGS|COMMAND] - -FLAGS: - -h, --help Prints help information - -v, --version Prints the app version - -COMMAND: - hash [--preset {bitwarden|owasp}] Generate an Argon2id PHC ADMIN_TOKEN - -PRESETS: m= t= p= - bitwarden (default) 64MiB, 3 Iterations, 4 Threads - owasp 19MiB, 2 Iterations, 1 Thread - -"; - -pub const VERSION: Option<&str> = option_env!("VW_VERSION"); - -fn parse_args() { - let mut pargs = pico_args::Arguments::from_env(); - let version = VERSION.unwrap_or("(Version info from Git not present)"); - - if pargs.contains(["-h", "--help"]) { - println!("vaultwarden {version}"); - print!("{HELP}"); - exit(0); - } else if pargs.contains(["-v", "--version"]) { - println!("vaultwarden {version}"); - exit(0); - } - - if let Some(command) = pargs.subcommand().unwrap_or_default() { - if command == "hash" { - use argon2::{ - password_hash::SaltString, Algorithm::Argon2id, Argon2, ParamsBuilder, PasswordHasher, Version::V0x13, - }; - - let mut argon2_params = ParamsBuilder::new(); - let preset: Option<String> = pargs.opt_value_from_str(["-p", "--preset"]).unwrap_or_default(); - let selected_preset; - match preset.as_deref() { - Some("owasp") => { - selected_preset = "owasp"; - argon2_params.m_cost(19456); - argon2_params.t_cost(2); - argon2_params.p_cost(1); - } - _ => { - // Bitwarden preset is the default - selected_preset = "bitwarden"; - argon2_params.m_cost(65540); - argon2_params.t_cost(3); - argon2_params.p_cost(4); - } - } - - println!("Generate an Argon2id PHC string using the '{selected_preset}' preset:\n"); - - let password = rpassword::prompt_password("Password: ").unwrap(); - if password.len() < 8 { - println!("\nPassword must contain at least 8 characters"); - exit(1); - } - - let password_verify = rpassword::prompt_password("Confirm Password: ").unwrap(); - if password != password_verify { - println!("\nPasswords do not match"); - exit(1); - } - - let argon2 = Argon2::new(Argon2id, V0x13, argon2_params.build().unwrap()); - let salt = SaltString::encode_b64(&crate::crypto::get_random_bytes::<32>()).unwrap(); - - let argon2_timer = tokio::time::Instant::now(); - if let Ok(password_hash) = argon2.hash_password(password.as_bytes(), &salt) { - println!( - "\n\ - ADMIN_TOKEN='{password_hash}'\n\n\ - Generation of the Argon2id PHC string took: {:?}", - argon2_timer.elapsed() - ); - } else { - error!("Unable to generate Argon2id PHC hash."); - exit(1); - } - } - exit(0); +#[inline] +fn static_init() { + api::init_ws_users(); + api::init_ws_anonymous_subscriptions(); + ratelimit::init_limiter(); +} +#[inline] +fn validate_config(mut path: PathBuf) -> Result<(), Error> { + if CONFIG.job_poll_interval_ms() > 0 { + err!("'JOB_POLL_INTERVAL_MS=0' must be set in the config file") + } else if CONFIG.extended_logging() { + err!("'EXTENDED_LOGGING=false' must be set in the config file") + } else if CONFIG._enable_yubico() { + err!("'_ENABLE_YUBICO=false' must be set in the config file") + } else if CONFIG._enable_duo() { + err!("'_ENABLE_DUO=false' must be set in the config file") + } else if CONFIG._enable_email_2fa() { + err!("'_ENABLE_EMAIL_2FA=false' must be set in the config file") + } else if CONFIG.is_admin_token_set() { + err!("'ADMIN_TOKEN' must not exit in the config file or be empty") + } else if !CONFIG.disable_icon_download() { + err!("'DISABLE_ICON_DOWNLOAD=true' must be set in the config file") + } else if CONFIG.org_events_enabled() { + err!("'ORG_EVENTS_ENABLED=false' must be set in the config file") + } else if CONFIG.org_groups_enabled() { + err!("'ORG_GROUPS_ENABLED=false' must be set in the config file") + } else if !CONFIG.log_level().eq_ignore_ascii_case("OFF") { + err!("'LOG_LEVEL=OFF' must be set in the config file") + } else if CONFIG.use_syslog() { + err!("'USE_SYSLOG=false' must be set in the config file") + } else if CONFIG.log_file().is_some() { + err!("'LOG_FILE' must not exist in the config file") + } else if !CONFIG.disable_2fa_remember() { + err!("'DISABLE_2FA_REMEMBER=false' must be set in the config file") + } else { + path.push(CONFIG.data_folder()); + unveil_create_read_write(path).map_err(Error::from) } } +pub const VERSION: &str = env!("CARGO_PKG_VERSION"); + fn launch_info() { println!( "\ /--------------------------------------------------------------------\\\n\ | Starting Vaultwarden |" ); - - if let Some(version) = VERSION { - println!("|{:^68}|", format!("Version {version}")); - } - + println!("|{:^68}|", format!("Version {VERSION}")); println!( "\ |--------------------------------------------------------------------|\n\ @@ -237,244 +169,22 @@ fn launch_info() { ); } -fn init_logging(level: log::LevelFilter) -> Result<(), fern::InitError> { - // Depending on the main log level we either want to disable or enable logging for trust-dns. - // Else if there are timeouts it will clutter the logs since trust-dns uses warn for this. - let trust_dns_level = if level >= log::LevelFilter::Debug { - level - } else { - log::LevelFilter::Off - }; - - let diesel_logger_level: log::LevelFilter = - if cfg!(feature = "query_logger") && std::env::var("QUERY_LOGGER").is_ok() { - log::LevelFilter::Debug - } else { - log::LevelFilter::Off - }; - - // Only show Rocket underscore `_` logs when the level is Debug or higher - // Else this will bloat the log output with useless messages. - let rocket_underscore_level = if level >= log::LevelFilter::Debug { - log::LevelFilter::Warn - } else { - log::LevelFilter::Off - }; - - // Only show handlebar logs when the level is Trace - let handlebars_level = if level >= log::LevelFilter::Trace { - log::LevelFilter::Trace - } else { - log::LevelFilter::Warn - }; - - let mut logger = fern::Dispatch::new() - .level(level) - // Hide unknown certificate errors if using self-signed - .level_for("rustls::session", log::LevelFilter::Off) - // Hide failed to close stream messages - .level_for("hyper::server", log::LevelFilter::Warn) - // Silence Rocket `_` logs - .level_for("_", rocket_underscore_level) - .level_for("rocket::response::responder::_", rocket_underscore_level) - .level_for("rocket::server::_", rocket_underscore_level) - .level_for("vaultwarden::api::admin::_", rocket_underscore_level) - .level_for("vaultwarden::api::notifications::_", rocket_underscore_level) - // Silence Rocket logs - .level_for("rocket::launch", log::LevelFilter::Error) - .level_for("rocket::launch_", log::LevelFilter::Error) - .level_for("rocket::rocket", log::LevelFilter::Warn) - .level_for("rocket::server", log::LevelFilter::Warn) - .level_for("rocket::fairing::fairings", log::LevelFilter::Warn) - .level_for("rocket::shield::shield", log::LevelFilter::Warn) - .level_for("hyper::proto", log::LevelFilter::Off) - .level_for("hyper::client", log::LevelFilter::Off) - // Filter handlebars logs - .level_for("handlebars::render", handlebars_level) - // Prevent cookie_store logs - .level_for("cookie_store", log::LevelFilter::Off) - // Variable level for trust-dns used by reqwest - .level_for("trust_dns_resolver::name_server::name_server", trust_dns_level) - .level_for("trust_dns_proto::xfer", trust_dns_level) - .level_for("diesel_logger", diesel_logger_level) - .chain(std::io::stdout()); - - // Enable smtp debug logging only specifically for smtp when need. - // This can contain sensitive information we do not want in the default debug/trace logging. - if CONFIG.smtp_debug() { - println!( - "[WARNING] SMTP Debugging is enabled (SMTP_DEBUG=true). Sensitive information could be disclosed via logs!\n\ - [WARNING] Only enable SMTP_DEBUG during troubleshooting!\n" - ); - logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Debug) - } else { - logger = logger.level_for("lettre::transport::smtp", log::LevelFilter::Off) - } - - if CONFIG.extended_logging() { - logger = logger.format(|out, message, record| { - out.finish(format_args!( - "[{}][{}][{}] {}", - chrono::Local::now().format(&CONFIG.log_timestamp_format()), - record.target(), - record.level(), - message - )) - }); - } else { - logger = logger.format(|out, message, _| out.finish(format_args!("{message}"))); - } - - if let Some(log_file) = CONFIG.log_file() { - #[cfg(windows)] - { - logger = logger.chain(fern::log_file(log_file)?); - } - #[cfg(not(windows))] - { - const SIGHUP: i32 = tokio::signal::unix::SignalKind::hangup().as_raw_value(); - let path = Path::new(&log_file); - logger = logger.chain(fern::log_reopen1(path, [SIGHUP])?); - } - } - - #[cfg(not(windows))] - { - if cfg!(feature = "enable_syslog") || CONFIG.use_syslog() { - logger = chain_syslog(logger); - } - } - - logger.apply()?; - - // Catch panics and log them instead of default output to StdErr - panic::set_hook(Box::new(|info| { - let thread = thread::current(); - let thread = thread.name().unwrap_or("unnamed"); - - let msg = match info.payload().downcast_ref::<&'static str>() { - Some(s) => *s, - None => match info.payload().downcast_ref::<String>() { - Some(s) => &**s, - None => "Box<Any>", - }, - }; - - let backtrace = std::backtrace::Backtrace::force_capture(); - - match info.location() { - Some(location) => { - error!( - target: "panic", "thread '{}' panicked at '{}': {}:{}\n{:}", - thread, - msg, - location.file(), - location.line(), - backtrace - ); - } - None => error!( - target: "panic", - "thread '{}' panicked at '{}'\n{:}", - thread, - msg, - backtrace - ), - } - })); - - Ok(()) -} - -#[cfg(not(windows))] -fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch { - let syslog_fmt = syslog::Formatter3164 { - facility: syslog::Facility::LOG_USER, - hostname: None, - process: "vaultwarden".into(), - pid: 0, - }; - - match syslog::unix(syslog_fmt) { - Ok(sl) => logger.chain(sl), - Err(e) => { - error!("Unable to connect to syslog: {:?}", e); - logger - } - } -} - fn create_dir(path: &str, description: &str) { // Try to create the specified dir, if it doesn't already exist. let err_msg = format!("Error creating {description} directory '{path}'"); create_dir_all(path).expect(&err_msg); } -async fn check_data_folder() { +fn check_data_folder() { let data_folder = &CONFIG.data_folder(); let path = Path::new(data_folder); if !path.exists() { - error!("Data folder '{}' doesn't exist.", data_folder); - if is_running_in_docker() { - error!("Verify that your data volume is mounted at the correct location."); - } else { - error!("Create the data folder and try again."); - } exit(1); } if !path.is_dir() { - error!("Data folder '{}' is not a directory.", data_folder); - exit(1); - } - - if is_running_in_docker() - && std::env::var("I_REALLY_WANT_VOLATILE_STORAGE").is_err() - && !docker_data_folder_is_persistent(data_folder).await - { - error!( - "No persistent volume!\n\ - ########################################################################################\n\ - # It looks like you did not configure a persistent volume! #\n\ - # This will result in permanent data loss when the container is removed or updated! #\n\ - # If you really want to use volatile storage set `I_REALLY_WANT_VOLATILE_STORAGE=true` #\n\ - ########################################################################################\n" - ); exit(1); } } - -/// Detect when using Docker or Podman the DATA_FOLDER is either a bind-mount or a volume created manually. -/// If not created manually, then the data will not be persistent. -/// A none persistent volume in either Docker or Podman is represented by a 64 alphanumerical string. -/// If we detect this string, we will alert about not having a persistent self defined volume. -/// This probably means that someone forgot to add `-v /path/to/vaultwarden_data/:/data` -async fn docker_data_folder_is_persistent(data_folder: &str) -> bool { - if let Ok(mountinfo) = File::open("/proc/self/mountinfo").await { - // Since there can only be one mountpoint to the DATA_FOLDER - // We do a basic check for this mountpoint surrounded by a space. - let data_folder_match = if data_folder.starts_with('/') { - format!(" {data_folder} ") - } else { - format!(" /{data_folder} ") - }; - let mut lines = BufReader::new(mountinfo).lines(); - while let Some(line) = lines.next_line().await.unwrap_or_default() { - // Only execute a regex check if we find the base match - if line.contains(&data_folder_match) { - let re = regex::Regex::new(r"/volumes/[a-z0-9]{64}/_data /").unwrap(); - if re.is_match(&line) { - return false; - } - // If we did found a match for the mountpoint, but not the regex, then still stop searching. - break; - } - } - } - // In all other cases, just assume a true. - // This is just an informative check to try and prevent data loss. - true -} - fn check_rsa_keys() -> Result<(), crate::error::Error> { // If the RSA keys don't exist, try to create them let priv_path = CONFIG.private_rsa_key(); @@ -485,7 +195,6 @@ fn check_rsa_keys() -> Result<(), crate::error::Error> { let priv_key = rsa_key.private_key_to_pem()?; crate::util::write_file(&priv_path, &priv_key)?; - info!("Private key created correctly."); } if !util::file_exists(&pub_path) { @@ -493,7 +202,6 @@ fn check_rsa_keys() -> Result<(), crate::error::Error> { let pub_key = rsa_key.public_key_to_pem()?; crate::util::write_file(&pub_path, &pub_key)?; - info!("Public key created correctly."); } auth::load_keys(); @@ -508,12 +216,6 @@ fn check_web_vault() { let index_path = Path::new(&CONFIG.web_vault_folder()).join("index.html"); if !index_path.exists() { - error!( - "Web vault is not found at '{}'. To install it, please follow the steps in: ", - CONFIG.web_vault_folder() - ); - error!("https://github.com/dani-garcia/vaultwarden/wiki/Building-binary#install-the-web-vault"); - error!("You can also set the environment variable 'WEB_VAULT_ENABLED=false' to disable it"); exit(1); } } @@ -521,17 +223,14 @@ fn check_web_vault() { async fn create_db_pool() -> db::DbPool { match util::retry_db(db::DbPool::from_config, CONFIG.db_connection_retries()).await { Ok(p) => p, - Err(e) => { - error!("Error creating database pool: {:?}", e); + Err(_) => { exit(1); } } } -async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> { +async fn launch_rocket(pool: db::DbPool, mut config: rocket::Config) -> Result<(), Error> { let basepath = &CONFIG.domain_path(); - - let mut config = rocket::Config::from(rocket::Config::figment()); config.temp_dir = canonicalize(CONFIG.tmp_folder()).unwrap().into(); config.cli_colors = false; // Make sure Rocket does not color any values for logging. config.limits = Limits::new() @@ -548,117 +247,27 @@ async fn launch_rocket(pool: db::DbPool, extra_debug: bool) -> Result<(), Error> .mount([basepath, "/events"].concat(), api::core_events_routes()) .mount([basepath, "/identity"].concat(), api::identity_routes()) .mount([basepath, "/icons"].concat(), api::icons_routes()) - .mount([basepath, "/notifications"].concat(), api::notifications_routes()) + .mount( + [basepath, "/notifications"].concat(), + api::notifications_routes(), + ) .register([basepath, "/"].concat(), api::web_catchers()) .register([basepath, "/api"].concat(), api::core_catchers()) .register([basepath, "/admin"].concat(), api::admin_catchers()) .manage(pool) .manage(api::start_notification_server()) - .manage(Arc::clone(&WS_ANONYMOUS_SUBSCRIPTIONS)) + .manage(Arc::clone(api::ws_anonymous_subscriptions())) .attach(util::AppHeaders()) .attach(util::Cors()) - .attach(util::BetterLogging(extra_debug)) .ignite() .await?; - CONFIG.set_rocket_shutdown_handle(instance.shutdown()); - tokio::spawn(async move { - tokio::signal::ctrl_c().await.expect("Error setting Ctrl-C handler"); - info!("Exiting vaultwarden!"); + tokio::signal::ctrl_c() + .await + .expect("Error setting Ctrl-C handler"); CONFIG.shutdown(); }); - let _ = instance.launch().await?; - - info!("Vaultwarden process exited!"); Ok(()) } - -fn schedule_jobs(pool: db::DbPool) { - if CONFIG.job_poll_interval_ms() == 0 { - info!("Job scheduler disabled."); - return; - } - - let runtime = tokio::runtime::Runtime::new().unwrap(); - - thread::Builder::new() - .name("job-scheduler".to_string()) - .spawn(move || { - use job_scheduler_ng::{Job, JobScheduler}; - let _runtime_guard = runtime.enter(); - - let mut sched = JobScheduler::new(); - - // Purge sends that are past their deletion date. - if !CONFIG.send_purge_schedule().is_empty() { - sched.add(Job::new(CONFIG.send_purge_schedule().parse().unwrap(), || { - runtime.spawn(api::purge_sends(pool.clone())); - })); - } - - // Purge trashed items that are old enough to be auto-deleted. - if !CONFIG.trash_purge_schedule().is_empty() { - sched.add(Job::new(CONFIG.trash_purge_schedule().parse().unwrap(), || { - runtime.spawn(api::purge_trashed_ciphers(pool.clone())); - })); - } - - // Send email notifications about incomplete 2FA logins, which potentially - // indicates that a user's master password has been compromised. - if !CONFIG.incomplete_2fa_schedule().is_empty() { - sched.add(Job::new(CONFIG.incomplete_2fa_schedule().parse().unwrap(), || { - runtime.spawn(api::send_incomplete_2fa_notifications(pool.clone())); - })); - } - - // Grant emergency access requests that have met the required wait time. - // This job should run before the emergency access reminders job to avoid - // sending reminders for requests that are about to be granted anyway. - if !CONFIG.emergency_request_timeout_schedule().is_empty() { - sched.add(Job::new(CONFIG.emergency_request_timeout_schedule().parse().unwrap(), || { - runtime.spawn(api::emergency_request_timeout_job(pool.clone())); - })); - } - - // Send reminders to emergency access grantors that there are pending - // emergency access requests. - if !CONFIG.emergency_notification_reminder_schedule().is_empty() { - sched.add(Job::new(CONFIG.emergency_notification_reminder_schedule().parse().unwrap(), || { - runtime.spawn(api::emergency_notification_reminder_job(pool.clone())); - })); - } - - if !CONFIG.auth_request_purge_schedule().is_empty() { - sched.add(Job::new(CONFIG.auth_request_purge_schedule().parse().unwrap(), || { - runtime.spawn(purge_auth_requests(pool.clone())); - })); - } - - // Cleanup the event table of records x days old. - if CONFIG.org_events_enabled() - && !CONFIG.event_cleanup_schedule().is_empty() - && CONFIG.events_days_retain().is_some() - { - sched.add(Job::new(CONFIG.event_cleanup_schedule().parse().unwrap(), || { - runtime.spawn(api::event_cleanup_job(pool.clone())); - })); - } - - // Periodically check for jobs to run. We probably won't need any - // jobs that run more often than once a minute, so a default poll - // interval of 30 seconds should be sufficient. Users who want to - // schedule jobs to run more frequently for some reason can reduce - // the poll interval accordingly. - // - // Note that the scheduler checks jobs in the order in which they - // were added, so if two jobs are both eligible to run at a given - // tick, the one that was added earlier will run first. - loop { - sched.tick(); - runtime.block_on(tokio::time::sleep(tokio::time::Duration::from_millis(CONFIG.job_poll_interval_ms()))); - } - }) - .expect("Error spawning job scheduler thread"); -} diff --git a/src/priv_sep.rs b/src/priv_sep.rs @@ -0,0 +1,85 @@ +#![allow(clippy::implicit_return, clippy::pub_use)] +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +use core::convert::Infallible; +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +pub use priv_sep::UnveilErr; +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +use priv_sep::{Permissions, Promise, Promises}; +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +use std::io::Error; +use std::path::Path; +/// Used instead of `()` for the parameter +/// in the `pledge` functions. This allows +/// one to avoid having to disable certain lints. +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +#[derive(Clone, Copy)] +pub struct Zst; +/// Calls `pledge` with only the sys calls necessary for a minimal application +/// to run. Specifically, the `Promise`s `Cpath`, `Flock`, Inet`, `Rpath`, `Stdio`, `Unveil`, and `Wpath` +/// are passed. +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +#[inline] +pub fn pledge_init() -> Result<Promises<7>, Error> { + let promises = Promises::new([ + Promise::Cpath, + Promise::Flock, + Promise::Inet, + Promise::Rpath, + Promise::Stdio, + Promise::Unveil, + Promise::Wpath, + ]); + match promises.pledge() { + Ok(()) => Ok(promises), + Err(e) => Err(e), + } +} +/// No-op that always returns `Ok`. +#[allow(clippy::unnecessary_wraps)] +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +#[inline] +pub const fn pledge_init() -> Result<Zst, Infallible> { + Ok(Zst) +} +/// Removes `Promise::Unveil`. +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +#[inline] +pub fn pledge_away_unveil(promises: &mut Promises<7>) -> Result<(), Error> { + promises.remove(Promise::Unveil); + promises.pledge() +} +/// No-op that always returns `Ok`. +#[allow(clippy::unnecessary_wraps)] +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +#[inline] +pub fn pledge_away_unveil(_: &mut Zst) -> Result<(), Infallible> { + Ok(()) +} +/// Calls `unveil` on `path` with `Permissions::READ`. +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +#[inline] +pub fn unveil_read<P: AsRef<Path>>(path: P) -> Result<(), UnveilErr> { + Permissions::READ.unveil(path) +} +/// No-op that always returns `Ok`. +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +#[allow(clippy::unnecessary_wraps)] +#[inline] +pub fn unveil_read<P: AsRef<Path>>(_: P) -> Result<(), Infallible> { + Ok(()) +} +/// Calls `unveil` on `path` with create, read, and write `Permissions`. +#[cfg(all(feature = "priv_sep", target_os = "openbsd"))] +#[inline] +pub fn unveil_create_read_write<P: AsRef<Path>>(path: P) -> Result<(), UnveilErr> { + let mut perms = Permissions::ALL; + perms.execute = false; + perms.unveil(path) +} +/// No-op that always returns `Ok`. +#[cfg(not(all(feature = "priv_sep", target_os = "openbsd")))] +#[allow(clippy::unnecessary_wraps)] +#[inline] +pub fn unveil_create_read_write<P: AsRef<Path>>(_: P) -> Result<(), Infallible> { + Ok(()) +} diff --git a/src/ratelimit.rs b/src/ratelimit.rs @@ -1,38 +1,31 @@ -use once_cell::sync::Lazy; -use std::{net::IpAddr, num::NonZeroU32, time::Duration}; - -use governor::{clock::DefaultClock, state::keyed::DashMapStateStore, Quota, RateLimiter}; - use crate::{Error, CONFIG}; - +use governor::{clock::DefaultClock, state::keyed::DashMapStateStore, Quota, RateLimiter}; +use std::{net::IpAddr, num::NonZeroU32, sync::OnceLock, time::Duration}; type Limiter<T = IpAddr> = RateLimiter<T, DashMapStateStore<T>, DefaultClock>; - -static LIMITER_LOGIN: Lazy<Limiter> = Lazy::new(|| { - let seconds = Duration::from_secs(CONFIG.login_ratelimit_seconds()); - let burst = NonZeroU32::new(CONFIG.login_ratelimit_max_burst()).expect("Non-zero login ratelimit burst"); - RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero login ratelimit seconds").allow_burst(burst)) -}); - -static LIMITER_ADMIN: Lazy<Limiter> = Lazy::new(|| { - let seconds = Duration::from_secs(CONFIG.admin_ratelimit_seconds()); - let burst = NonZeroU32::new(CONFIG.admin_ratelimit_max_burst()).expect("Non-zero admin ratelimit burst"); - RateLimiter::keyed(Quota::with_period(seconds).expect("Non-zero admin ratelimit seconds").allow_burst(burst)) -}); - +static LIMITER_LOGIN: OnceLock<Limiter> = OnceLock::new(); +pub fn init_limiter() { + LIMITER_LOGIN + .set({ + let seconds = Duration::from_secs(CONFIG.login_ratelimit_seconds()); + let burst = NonZeroU32::new(CONFIG.login_ratelimit_max_burst()) + .expect("Non-zero login ratelimit burst"); + RateLimiter::keyed( + Quota::with_period(seconds) + .expect("Non-zero login ratelimit seconds") + .allow_burst(burst), + ) + }) + .expect("") +} pub fn check_limit_login(ip: &IpAddr) -> Result<(), Error> { - match LIMITER_LOGIN.check_key(ip) { + match LIMITER_LOGIN + .get() + .expect("LIMITER_LOGIN should be initialized in main") + .check_key(ip) + { Ok(_) => Ok(()), Err(_e) => { err_code!("Too many login requests", 429); } } } - -pub fn check_limit_admin(ip: &IpAddr) -> Result<(), Error> { - match LIMITER_ADMIN.check_key(ip) { - Ok(_) => Ok(()), - Err(_e) => { - err_code!("Too many admin requests", 429); - } - } -} diff --git a/src/static/scripts/admin_diagnostics.js b/src/static/scripts/admin_diagnostics.js @@ -238,4 +238,4 @@ document.addEventListener("DOMContentLoaded", (event) => { if (btnCopySupport) { btnCopySupport.addEventListener("click", copyToClipboard); } -}); -\ No newline at end of file +}); diff --git a/src/util.rs b/src/util.rs @@ -1,17 +1,14 @@ // // Web Headers and caching // -use std::{ - io::{Cursor, ErrorKind}, - ops::Deref, -}; +use std::{io::Cursor, ops::Deref}; use rocket::{ fairing::{Fairing, Info, Kind}, http::{ContentType, Header, HeaderMap, Method, Status}, request::FromParam, response::{self, Responder}, - Data, Orbit, Request, Response, Rocket, + Request, Response, }; use tokio::{ @@ -38,10 +35,16 @@ impl Fairing for AppHeaders { // Check if this connection is an Upgrade/WebSocket connection and return early // We do not want add any extra headers, this could cause issues with reverse proxies or CloudFlare - if req_uri_path.ends_with("notifications/hub") || req_uri_path.ends_with("notifications/anonymous-hub") { - match (req_headers.get_one("connection"), req_headers.get_one("upgrade")) { + if req_uri_path.ends_with("notifications/hub") + || req_uri_path.ends_with("notifications/anonymous-hub") + { + match ( + req_headers.get_one("connection"), + req_headers.get_one("upgrade"), + ) { (Some(c), Some(u)) - if c.to_lowercase().contains("upgrade") && u.to_lowercase().contains("websocket") => + if c.to_lowercase().contains("upgrade") + && u.to_lowercase().contains("websocket") => { // Remove headers which could cause websocket connection issues res.remove_header("X-Frame-Options"); @@ -59,7 +62,7 @@ impl Fairing for AppHeaders { res.set_raw_header("X-XSS-Protection", "0"); // Do not send the Content-Security-Policy (CSP) Header and X-Frame-Options for the *-connector.html files. - // This can cause issues when some MFA requests needs to open a popup or page within the clients like WebAuthn, or Duo. + // This can cause issues when some MFA requests needs to open a popup or page within the clients like WebAuthn. // This is the same behavior as upstream Bitwarden. if !req_uri_path.ends_with("connector.html") { // # Frame Ancestors: @@ -161,8 +164,14 @@ impl Fairing for Cors { let req_allow_headers = Cors::get_header(req_headers, "Access-Control-Request-Headers"); let req_allow_method = Cors::get_header(req_headers, "Access-Control-Request-Method"); - response.set_header(Header::new("Access-Control-Allow-Methods", req_allow_method)); - response.set_header(Header::new("Access-Control-Allow-Headers", req_allow_headers)); + response.set_header(Header::new( + "Access-Control-Allow-Methods", + req_allow_method, + )); + response.set_header(Header::new( + "Access-Control-Allow-Headers", + req_allow_headers, + )); response.set_header(Header::new("Access-Control-Allow-Credentials", "true")); response.set_status(Status::Ok); response.set_header(ContentType::Plain); @@ -202,7 +211,6 @@ impl<R> Cached<R> { } } } - impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cached<R> { fn respond_to(self, request: &'r Request<'_>) -> response::Result<'static> { let mut res = self.response.respond_to(request)?; @@ -220,7 +228,6 @@ impl<'r, R: 'r + Responder<'r, 'static> + Send> Responder<'r, 'static> for Cache Ok(res) } } - pub struct SafeString(String); impl std::fmt::Display for SafeString { @@ -249,88 +256,16 @@ impl<'r> FromParam<'r> for SafeString { #[inline(always)] fn from_param(param: &'r str) -> Result<Self, Self::Error> { - if param.chars().all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) { + if param + .chars() + .all(|c| matches!(c, 'a'..='z' | 'A'..='Z' |'0'..='9' | '-')) + { Ok(SafeString(param.to_string())) } else { Err(()) } } } - -// Log all the routes from the main paths list, and the attachments endpoint -// Effectively ignores, any static file route, and the alive endpoint -const LOGGED_ROUTES: [&str; 7] = ["/api", "/admin", "/identity", "/icons", "/attachments", "/events", "/notifications"]; - -// Boolean is extra debug, when true, we ignore the whitelist above and also print the mounts -pub struct BetterLogging(pub bool); -#[rocket::async_trait] -impl Fairing for BetterLogging { - fn info(&self) -> Info { - Info { - name: "Better Logging", - kind: Kind::Liftoff | Kind::Request | Kind::Response, - } - } - - async fn on_liftoff(&self, rocket: &Rocket<Orbit>) { - if self.0 { - info!(target: "routes", "Routes loaded:"); - let mut routes: Vec<_> = rocket.routes().collect(); - routes.sort_by_key(|r| r.uri.path()); - for route in routes { - if route.rank < 0 { - info!(target: "routes", "{:<6} {}", route.method, route.uri); - } else { - info!(target: "routes", "{:<6} {} [{}]", route.method, route.uri, route.rank); - } - } - } - - let config = rocket.config(); - let scheme = if config.tls_enabled() { - "https" - } else { - "http" - }; - let addr = format!("{}://{}:{}", &scheme, &config.address, &config.port); - info!(target: "start", "Rocket has launched from {}", addr); - } - - async fn on_request(&self, request: &mut Request<'_>, _data: &mut Data<'_>) { - let method = request.method(); - if !self.0 && method == Method::Options { - return; - } - let uri = request.uri(); - let uri_path = uri.path(); - let uri_path_str = uri_path.url_decode_lossy(); - let uri_subpath = uri_path_str.strip_prefix(&CONFIG.domain_path()).unwrap_or(&uri_path_str); - if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) { - match uri.query() { - Some(q) => info!(target: "request", "{} {}?{}", method, uri_path_str, &q[..q.len().min(30)]), - None => info!(target: "request", "{} {}", method, uri_path_str), - }; - } - } - - async fn on_response<'r>(&self, request: &'r Request<'_>, response: &mut Response<'r>) { - if !self.0 && request.method() == Method::Options { - return; - } - let uri_path = request.uri().path(); - let uri_path_str = uri_path.url_decode_lossy(); - let uri_subpath = uri_path_str.strip_prefix(&CONFIG.domain_path()).unwrap_or(&uri_path_str); - if self.0 || LOGGED_ROUTES.iter().any(|r| uri_subpath.starts_with(r)) { - let status = response.status(); - if let Some(ref route) = request.route() { - info!(target: "response", "{} => {}", route, status) - } else { - info!(target: "response", "{}", status) - } - } - } -} - // // File handling // @@ -349,9 +284,6 @@ pub fn write_file(path: &str, content: &[u8]) -> Result<(), crate::error::Error> let mut f = match File::create(path) { Ok(file) => file, Err(e) => { - if e.kind() == ErrorKind::PermissionDenied { - error!("Can't create '{}': Permission denied", path); - } return Err(From::from(e)); } }; @@ -517,38 +449,13 @@ pub fn format_naive_datetime_local(dt: &NaiveDateTime, fmt: &str) -> String { /// /// https://httpwg.org/specs/rfc7231.html#http.date pub fn format_datetime_http(dt: &DateTime<Local>) -> String { - let expiry_time = DateTime::<chrono::Utc>::from_naive_utc_and_offset(dt.naive_utc(), chrono::Utc); + let expiry_time = + DateTime::<chrono::Utc>::from_naive_utc_and_offset(dt.naive_utc(), chrono::Utc); // HACK: HTTP expects the date to always be GMT (UTC) rather than giving an // offset (which would always be 0 in UTC anyway) expiry_time.to_rfc2822().replace("+0000", "GMT") } - -pub fn parse_date(date: &str) -> NaiveDateTime { - NaiveDateTime::parse_from_str(date, DATETIME_FORMAT).unwrap() -} - -// -// Deployment environment methods -// - -/// Returns true if the program is running in Docker or Podman. -pub fn is_running_in_docker() -> bool { - Path::new("/.dockerenv").exists() || Path::new("/run/.containerenv").exists() -} - -/// Simple check to determine on which docker base image vaultwarden is running. -/// We build images based upon Debian or Alpine, so these we check here. -pub fn docker_base_image() -> &'static str { - if Path::new("/etc/debian_version").exists() { - "Debian" - } else if Path::new("/etc/alpine-release").exists() { - "Alpine" - } else { - "Unknown" - } -} - // // Deserialization methods // @@ -693,25 +600,6 @@ where } } } - -use reqwest::{header, Client, ClientBuilder}; - -pub fn get_reqwest_client() -> Client { - match get_reqwest_client_builder().build() { - Ok(client) => client, - Err(e) => { - error!("Possible trust-dns error, trying with trust-dns disabled: '{e}'"); - get_reqwest_client_builder().trust_dns(false).build().expect("Failed to build client") - } - } -} - -pub fn get_reqwest_client_builder() -> ClientBuilder { - let mut headers = header::HeaderMap::new(); - headers.insert(header::USER_AGENT, header::HeaderValue::from_static("Vaultwarden")); - Client::builder().default_headers(headers).timeout(Duration::from_secs(10)) -} - pub fn convert_json_key_lcase_first(src_json: Value) -> Value { match src_json { Value::Array(elm) => { diff --git a/tools/global_domains.py b/tools/global_domains.py @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -# -# This script generates a global equivalent domains JSON file from -# the upstream Bitwarden source repo. -# -import json -import re -import sys -import urllib.request - -from collections import OrderedDict - -if not (2 <= len(sys.argv) <= 3): - print("usage: %s <OUTPUT-FILE> [GIT-REF]" % sys.argv[0]) - print() - print("This script generates a global equivalent domains JSON file from") - print("the upstream Bitwarden source repo.") - sys.exit(1) - -OUTPUT_FILE = sys.argv[1] -GIT_REF = 'master' if len(sys.argv) == 2 else sys.argv[2] - -BASE_URL = 'https://github.com/bitwarden/server/raw/%s' % GIT_REF -ENUMS_URL = '%s/src/Core/Enums/GlobalEquivalentDomainsType.cs' % BASE_URL -DOMAIN_LISTS_URL = '%s/src/Core/Utilities/StaticStore.cs' % BASE_URL - -# Enum lines look like: -# -# EnumName0 = 0, -# EnumName1 = 1, -# -ENUM_RE = re.compile( - r'\s*' # Leading whitespace (optional). - r'([_0-9a-zA-Z]+)' # Enum name (capture group 1). - r'\s*=\s*' # '=' with optional surrounding whitespace. - r'([0-9]+)' # Enum value (capture group 2). -) - -# Global domains lines look like: -# -# GlobalDomains.Add(GlobalEquivalentDomainsType.EnumName, new List<string> { "x.com", "y.com" }); -# -DOMAIN_LIST_RE = re.compile( - r'\s*' # Leading whitespace (optional). - r'GlobalDomains\.Add\(GlobalEquivalentDomainsType\.' - r'([_0-9a-zA-Z]+)' # Enum name (capture group 1). - r'\s*,\s*new List<string>\s*{' - r'([^}]+)' # Domain list (capture group 2). - r'}\);' -) - -enums = dict() -domain_lists = OrderedDict() - -# Read in the enum names and values. -with urllib.request.urlopen(ENUMS_URL) as response: - for ln in response.read().decode('utf-8').split('\n'): - m = ENUM_RE.match(ln) - if m: - enums[m.group(1)] = int(m.group(2)) - -# Read in the domain lists. -with urllib.request.urlopen(DOMAIN_LISTS_URL) as response: - for ln in response.read().decode('utf-8').split('\n'): - m = DOMAIN_LIST_RE.match(ln) - if m: - # Strip double quotes and extraneous spaces in each domain. - domain_lists[m.group(1)] = [d.strip(' "') for d in m.group(2).split(",")] - -# Build the global domains data structure. -global_domains = [] -for name, domain_list in domain_lists.items(): - entry = OrderedDict() - entry["Type"] = enums[name] - entry["Domains"] = domain_list - entry["Excluded"] = False - global_domains.append(entry) - -# Write out the global domains JSON file. -with open(OUTPUT_FILE, 'w') as f: - json.dump(global_domains, f, indent=2)