diff --git a/Dockerfile b/Dockerfile index 82f54ee..ad74d44 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,9 +13,19 @@ COPY frontend/ ./ RUN npm run generate +FROM alpine:3 AS sqlite-extension-compiler +WORKDIR /var/lib/warren + +RUN apk add sqlite-libs sqlite-dev build-base +COPY backend/sqlite_extensions sqlite_extensions +RUN gcc -g -fPIC -shared sqlite_extensions/uuid.c -o sqlite_extensions/uuid + + FROM rust:alpine AS backend-builder WORKDIR /usr/src/warren +RUN apk add sqlite sqlite-dev build-base + COPY backend/Cargo.toml backend/Cargo.lock ./ RUN mkdir -p src/bin/backend && mkdir src/lib && echo "fn main() {}" > src/bin/backend/main.rs && echo "" > src/lib/lib.rs RUN apk add --no-cache pkgconfig openssl openssl-dev libc-dev openssl-libs-static @@ -28,6 +38,8 @@ RUN cargo build --release FROM alpine:3 WORKDIR /var/lib/warren +COPY --from=sqlite-extension-compiler /var/lib/warren/sqlite_extensions/uuid /var/lib/warren/sqlite_extensions/uuid + COPY --from=backend-builder /usr/src/warren/target/release/warren_backend /usr/bin/warren COPY --from=frontend-builder /usr/src/warren/dist ./frontend diff --git a/backend/Cargo.lock b/backend/Cargo.lock index b68560b..6471dd9 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -1235,6 +1235,7 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ + "cc", "pkg-config", "vcpkg", ] diff --git a/backend/Cargo.toml b/backend/Cargo.toml index d879ade..d1c20af 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -13,7 +13,7 @@ path = "src/bin/backend/main.rs" [dependencies] anyhow = "1.0.98" -argon2 = "0.5.3" +argon2 = { version = "0.5.3", features = ["std"] } axum = { version = "0.8.4", features = ["multipart", "query"] } axum-extra = { version = "0.10.1", features = ["cookie", "multipart"] } base64 = "0.22.1" @@ -29,13 +29,7 @@ regex = "1.11.1" rustix = { version = "1.0.8", features = ["fs"] } serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" -sqlx = { version = "0.8.6", features = [ - "chrono", - "postgres", - "runtime-tokio", - "time", - "uuid", -] } +sqlx = { version = "0.8.6", features = ["chrono", "runtime-tokio", "sqlite", "time", "uuid"] } thiserror = "2.0.12" tokio = { version = "1.46.1", features = ["full"] } tokio-stream = "0.1.17" diff --git a/backend/migrations/20250712021357_create_warren_table.sql b/backend/migrations/20250712021357_create_warren_table.sql deleted file mode 100644 index 0482360..0000000 --- a/backend/migrations/20250712021357_create_warren_table.sql +++ /dev/null @@ -1,7 +0,0 @@ -CREATE TABLE warrens ( - id UUID PRIMARY KEY DEFAULT GEN_RANDOM_UUID(), - path VARCHAR NOT NULL, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP -); - -CREATE INDEX idx_warrens_path ON warrens(path); diff --git a/backend/migrations/20250712024344_add_warren_name.sql b/backend/migrations/20250712024344_add_warren_name.sql deleted file mode 100644 index d0d16cb..0000000 --- a/backend/migrations/20250712024344_add_warren_name.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE warrens ADD COLUMN name VARCHAR NOT NULL; diff --git a/backend/migrations/20250716125144_create_users_table.sql b/backend/migrations/20250716125144_create_users_table.sql deleted file mode 100644 index 05badb1..0000000 --- a/backend/migrations/20250716125144_create_users_table.sql +++ /dev/null @@ -1,9 +0,0 @@ -CREATE TABLE users ( - id UUID PRIMARY KEY DEFAULT GEN_RANDOM_UUID(), - name VARCHAR NOT NULL, - email VARCHAR NOT NULL, - hash VARCHAR NOT NULL, - admin BOOLEAN NOT NULL DEFAULT FALSE, - updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP -); diff --git a/backend/migrations/20250716135209_user_email_unique.sql b/backend/migrations/20250716135209_user_email_unique.sql deleted file mode 100644 index 6ab1046..0000000 --- a/backend/migrations/20250716135209_user_email_unique.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE users ADD CONSTRAINT users_email_key UNIQUE (email); diff --git a/backend/migrations/20250717123142_create_auth_sessions.sql b/backend/migrations/20250717123142_create_auth_sessions.sql deleted file mode 100644 index 7210de5..0000000 --- a/backend/migrations/20250717123142_create_auth_sessions.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE auth_sessions ( - session_id VARCHAR NOT NULL PRIMARY KEY, - user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, - expires_at TIMESTAMP NOT NULL, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP -); diff --git a/backend/migrations/20250718180903_create_user_warren_table.sql b/backend/migrations/20250718180903_create_user_warren_table.sql deleted file mode 100644 index e56f0f8..0000000 --- a/backend/migrations/20250718180903_create_user_warren_table.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE user_warrens ( - user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, - warren_id UUID NOT NULL REFERENCES warrens(id) ON DELETE CASCADE, - can_create_children BOOLEAN NOT NULL, - can_list_files BOOLEAN NOT NULL, - can_read_files BOOLEAN NOT NULL, - can_modify_files BOOLEAN NOT NULL, - can_delete_files BOOLEAN NOT NULL, - can_delete_warren BOOLEAN NOT NULL, - PRIMARY KEY(user_id, warren_id) -); diff --git a/backend/migrations/20250721171502_user_warrens_drop_some_permissions.sql b/backend/migrations/20250721171502_user_warrens_drop_some_permissions.sql deleted file mode 100644 index 5299300..0000000 --- a/backend/migrations/20250721171502_user_warrens_drop_some_permissions.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE user_warrens DROP COLUMN can_create_children, DROP COLUMN can_delete_warren; diff --git a/backend/migrations/20250808160437_users_oidc.sql b/backend/migrations/20250808160437_users_oidc.sql deleted file mode 100644 index 86248d6..0000000 --- a/backend/migrations/20250808160437_users_oidc.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE users ALTER COLUMN hash DROP NOT NULL; -ALTER TABLE users ADD COLUMN oidc_sub VARCHAR UNIQUE; diff --git a/backend/migrations/20250810215614_create_shares_table.sql b/backend/migrations/20250810215614_create_shares_table.sql deleted file mode 100644 index c6f3c15..0000000 --- a/backend/migrations/20250810215614_create_shares_table.sql +++ /dev/null @@ -1,9 +0,0 @@ -CREATE TABLE shares ( - id UUID PRIMARY KEY DEFAULT GEN_RANDOM_UUID(), - creator_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, - warren_id UUID NOT NULL REFERENCES warrens(id) ON DELETE CASCADE, - path VARCHAR NOT NULL, - password_hash VARCHAR NOT NULL, - expires_at TIMESTAMP, - created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP -); diff --git a/backend/migrations/20250825115342_share_permissions.sql b/backend/migrations/20250825115342_share_permissions.sql deleted file mode 100644 index 985d10a..0000000 --- a/backend/migrations/20250825115342_share_permissions.sql +++ /dev/null @@ -1,21 +0,0 @@ -ALTER TABLE - user_warrens -ADD COLUMN - can_list_shares BOOLEAN NOT NULL DEFAULT false, -ADD COLUMN - can_create_shares BOOLEAN NOT NULL DEFAULT false, -ADD COLUMN - can_modify_shares BOOLEAN NOT NULL DEFAULT false, -ADD COLUMN - can_delete_shares BOOLEAN NOT NULL DEFAULT false; - -ALTER TABLE - user_warrens -ALTER COLUMN - can_list_shares DROP DEFAULT, -ALTER COLUMN - can_create_shares DROP DEFAULT, -ALTER COLUMN - can_modify_shares DROP DEFAULT, -ALTER COLUMN - can_delete_shares DROP DEFAULT; diff --git a/backend/migrations/20250825143434_shares_password_nullable.sql b/backend/migrations/20250825143434_shares_password_nullable.sql deleted file mode 100644 index 6b9e082..0000000 --- a/backend/migrations/20250825143434_shares_password_nullable.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE shares ALTER COLUMN password_hash DROP NOT NULL; diff --git a/backend/migrations/20250825150026_shares_path_index.sql b/backend/migrations/20250825150026_shares_path_index.sql deleted file mode 100644 index 58e1ed8..0000000 --- a/backend/migrations/20250825150026_shares_path_index.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE INDEX idx_shares_path ON shares(path); diff --git a/backend/migrations/20250906174941_init.sql b/backend/migrations/20250906174941_init.sql new file mode 100644 index 0000000..1a8d604 --- /dev/null +++ b/backend/migrations/20250906174941_init.sql @@ -0,0 +1,50 @@ +CREATE TABLE users ( + id BLOB NOT NULL PRIMARY KEY DEFAULT (uuid_blob(uuid())), + oidc_sub TEXT UNIQUE, + name TEXT NOT NULL, + email TEXT NOT NULL UNIQUE, + admin BOOLEAN NOT NULL DEFAULT FALSE, + hash TEXT, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE warrens ( + id BLOB NOT NULL PRIMARY KEY DEFAULT (uuid_blob(uuid())), + name TEXT NOT NULL, + path TEXT NOT NULL UNIQUE, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE user_warrens ( + user_id BLOB NOT NULL REFERENCES users(id) ON DELETE CASCADE, + warren_id BLOB NOT NULL REFERENCES warrens(id) ON DELETE CASCADE, + can_list_files BOOLEAN NOT NULL, + can_read_files BOOLEAN NOT NULL, + can_modify_files BOOLEAN NOT NULL, + can_delete_files BOOLEAN NOT NULL, + can_list_shares BOOLEAN NOT NULL, + can_create_shares BOOLEAN NOT NULL, + can_modify_shares BOOLEAN NOT NULL, + can_delete_shares BOOLEAN NOT NULL, + PRIMARY KEY(user_id, warren_id) +); + +CREATE TABLE shares ( + id BLOB NOT NULL PRIMARY KEY DEFAULT (uuid_blob(uuid())), + creator_id BLOB NOT NULL REFERENCES users(id) ON DELETE CASCADE, + warren_id BLOB NOT NULL REFERENCES warrens(id) ON DELETE CASCADE, + path TEXT NOT NULL, + password_hash TEXT, + expires_at DATETIME, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX idx_shares_path ON shares(path); + +CREATE TABLE auth_sessions ( + session_id TEXT NOT NULL PRIMARY KEY, + user_id BLOB NOT NULL REFERENCES users(id) ON DELETE CASCADE, + expires_at DATETIME NOT NULL, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); diff --git a/backend/migrations/20250829140914_create_admin_user.sql b/backend/migrations/20250906184417_create_admin_user.sql similarity index 100% rename from backend/migrations/20250829140914_create_admin_user.sql rename to backend/migrations/20250906184417_create_admin_user.sql diff --git a/backend/sqlite_extensions/uuid b/backend/sqlite_extensions/uuid new file mode 100755 index 0000000..f368b5f Binary files /dev/null and b/backend/sqlite_extensions/uuid differ diff --git a/backend/sqlite_extensions/uuid.c b/backend/sqlite_extensions/uuid.c new file mode 100644 index 0000000..9732e3c --- /dev/null +++ b/backend/sqlite_extensions/uuid.c @@ -0,0 +1,231 @@ +/* +** 2019-10-23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This SQLite extension implements functions that handling RFC-4122 UUIDs +** Three SQL functions are implemented: +** +** uuid() - generate a version 4 UUID as a string +** uuid_str(X) - convert a UUID X into a well-formed UUID string +** uuid_blob(X) - convert a UUID X into a 16-byte blob +** +** The output from uuid() and uuid_str(X) are always well-formed RFC-4122 +** UUID strings in this format: +** +** xxxxxxxx-xxxx-Mxxx-Nxxx-xxxxxxxxxxxx +** +** All of the 'x', 'M', and 'N' values are lower-case hexadecimal digits. +** The M digit indicates the "version". For uuid()-generated UUIDs, the +** version is always "4" (a random UUID). The upper three bits of N digit +** are the "variant". This library only supports variant 1 (indicated +** by values of N between '8' and 'b') as those are overwhelming the most +** common. Other variants are for legacy compatibility only. +** +** The output of uuid_blob(X) is always a 16-byte blob. The UUID input +** string is converted in network byte order (big-endian) in accordance +** with RFC-4122 specifications for variant-1 UUIDs. Note that network +** byte order is *always* used, even if the input self-identifies as a +** variant-2 UUID. +** +** The input X to the uuid_str() and uuid_blob() functions can be either +** a string or a BLOB. If it is a BLOB it must be exactly 16 bytes in +** length or else a NULL is returned. If the input is a string it must +** consist of 32 hexadecimal digits, upper or lower case, optionally +** surrounded by {...} and with optional "-" characters interposed in the +** middle. The flexibility of input is inspired by the PostgreSQL +** implementation of UUID functions that accept in all of the following +** formats: +** +** A0EEBC99-9C0B-4EF8-BB6D-6BB9BD380A11 +** {a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11} +** a0eebc999c0b4ef8bb6d6bb9bd380a11 +** a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 +** {a0eebc99-9c0b4ef8-bb6d6bb9-bd380a11} +** +** If any of the above inputs are passed into uuid_str(), the output will +** always be in the canonical RFC-4122 format: +** +** a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11 +** +** If the X input string has too few or too many digits or contains +** stray characters other than {, }, or -, then NULL is returned. +*/ +#include "sqlite3ext.h" +SQLITE_EXTENSION_INIT1 +#include +#include +#include + +#if !defined(SQLITE_ASCII) && !defined(SQLITE_EBCDIC) +#define SQLITE_ASCII 1 +#endif + +/* +** Translate a single byte of Hex into an integer. +** This routine only works if h really is a valid hexadecimal +** character: 0..9a..fA..F +*/ +static unsigned char sqlite3UuidHexToInt(int h) { + assert((h >= '0' && h <= '9') || (h >= 'a' && h <= 'f') || + (h >= 'A' && h <= 'F')); +#ifdef SQLITE_ASCII + h += 9 * (1 & (h >> 6)); +#endif +#ifdef SQLITE_EBCDIC + h += 9 * (1 & ~(h >> 4)); +#endif + return (unsigned char)(h & 0xf); +} + +/* +** Convert a 16-byte BLOB into a well-formed RFC-4122 UUID. The output +** buffer zStr should be at least 37 bytes in length. The output will +** be zero-terminated. +*/ +static void sqlite3UuidBlobToStr(const unsigned char *aBlob, /* Input blob */ + unsigned char *zStr /* Write the answer here */ +) { + static const char zDigits[] = "0123456789abcdef"; + int i, k; + unsigned char x; + k = 0; + for (i = 0, k = 0x550; i < 16; i++, k = k >> 1) { + if (k & 1) { + zStr[0] = '-'; + zStr++; + } + x = aBlob[i]; + zStr[0] = zDigits[x >> 4]; + zStr[1] = zDigits[x & 0xf]; + zStr += 2; + } + *zStr = 0; +} + +/* +** Attempt to parse a zero-terminated input string zStr into a binary +** UUID. Return 0 on success, or non-zero if the input string is not +** parsable. +*/ +static int sqlite3UuidStrToBlob(const unsigned char *zStr, /* Input string */ + unsigned char *aBlob /* Write results here */ +) { + int i; + if (zStr[0] == '{') + zStr++; + for (i = 0; i < 16; i++) { + if (zStr[0] == '-') + zStr++; + if (isxdigit(zStr[0]) && isxdigit(zStr[1])) { + aBlob[i] = (sqlite3UuidHexToInt(zStr[0]) << 4) + + sqlite3UuidHexToInt(zStr[1]); + zStr += 2; + } else { + return 1; + } + } + if (zStr[0] == '}') + zStr++; + return zStr[0] != 0; +} + +/* +** Render sqlite3_value pIn as a 16-byte UUID blob. Return a pointer +** to the blob, or NULL if the input is not well-formed. +*/ +static const unsigned char * +sqlite3UuidInputToBlob(sqlite3_value *pIn, /* Input text */ + unsigned char *pBuf /* output buffer */ +) { + switch (sqlite3_value_type(pIn)) { + case SQLITE_TEXT: { + const unsigned char *z = sqlite3_value_text(pIn); + if (sqlite3UuidStrToBlob(z, pBuf)) + return 0; + return pBuf; + } + case SQLITE_BLOB: { + int n = sqlite3_value_bytes(pIn); + return n == 16 ? sqlite3_value_blob(pIn) : 0; + } + default: { + return 0; + } + } +} + +/* Implementation of uuid() */ +static void sqlite3UuidFunc(sqlite3_context *context, int argc, + sqlite3_value **argv) { + unsigned char aBlob[16]; + unsigned char zStr[37]; + (void)argc; + (void)argv; + sqlite3_randomness(16, aBlob); + aBlob[6] = (aBlob[6] & 0x0f) + 0x40; + aBlob[8] = (aBlob[8] & 0x3f) + 0x80; + sqlite3UuidBlobToStr(aBlob, zStr); + sqlite3_result_text(context, (char *)zStr, 36, SQLITE_TRANSIENT); +} + +/* Implementation of uuid_str() */ +static void sqlite3UuidStrFunc(sqlite3_context *context, int argc, + sqlite3_value **argv) { + unsigned char aBlob[16]; + unsigned char zStr[37]; + const unsigned char *pBlob; + (void)argc; + pBlob = sqlite3UuidInputToBlob(argv[0], aBlob); + if (pBlob == 0) + return; + sqlite3UuidBlobToStr(pBlob, zStr); + sqlite3_result_text(context, (char *)zStr, 36, SQLITE_TRANSIENT); +} + +/* Implementation of uuid_blob() */ +static void sqlite3UuidBlobFunc(sqlite3_context *context, int argc, + sqlite3_value **argv) { + unsigned char aBlob[16]; + const unsigned char *pBlob; + (void)argc; + pBlob = sqlite3UuidInputToBlob(argv[0], aBlob); + if (pBlob == 0) + return; + sqlite3_result_blob(context, pBlob, 16, SQLITE_TRANSIENT); +} + +#ifdef _WIN32 +__declspec(dllexport) +#endif +int sqlite3_uuid_init( + sqlite3 *db, + char **pzErrMsg, + const sqlite3_api_routines *pApi +){ + int rc = SQLITE_OK; + SQLITE_EXTENSION_INIT2(pApi); + (void)pzErrMsg; /* Unused parameter */ + rc = sqlite3_create_function(db, "uuid", 0, SQLITE_UTF8 | SQLITE_INNOCUOUS, + 0, sqlite3UuidFunc, 0, 0); + if (rc == SQLITE_OK) { + rc = sqlite3_create_function(db, "uuid_str", 1, + SQLITE_UTF8 | SQLITE_INNOCUOUS | + SQLITE_DETERMINISTIC, + 0, sqlite3UuidStrFunc, 0, 0); + } + if (rc == SQLITE_OK) { + rc = sqlite3_create_function(db, "uuid_blob", 1, + SQLITE_UTF8 | SQLITE_INNOCUOUS | + SQLITE_DETERMINISTIC, + 0, sqlite3UuidBlobFunc, 0, 0); + } + return rc; +} diff --git a/backend/src/bin/backend/main.rs b/backend/src/bin/backend/main.rs index d61bb6c..ac0324e 100644 --- a/backend/src/bin/backend/main.rs +++ b/backend/src/bin/backend/main.rs @@ -7,7 +7,7 @@ use warren::{ metrics_debug_logger::MetricsDebugLogger, notifier_debug_logger::NotifierDebugLogger, oidc::{Oidc, OidcConfig}, - postgres::{Postgres, PostgresConfig}, + sqlite::{Sqlite, SqliteConfig}, }, }; @@ -25,16 +25,15 @@ async fn main() -> anyhow::Result<()> { let metrics = MetricsDebugLogger::new(); let notifier = NotifierDebugLogger::new(); - let postgres_config = - PostgresConfig::new(config.database_url.clone(), config.database_name.clone()); - let postgres = Postgres::new(postgres_config).await?; + let sqlite_config = SqliteConfig::new(config.database_url.clone()); + let sqlite = Sqlite::new(sqlite_config).await?; let fs_config = FileSystemConfig::from_env(config.serve_dir.clone())?; let fs = FileSystem::new(fs_config)?; let fs_service = domain::warren::service::file_system::Service::new(fs, metrics, notifier); let warren_service = domain::warren::service::warren::Service::new( - postgres.clone(), + sqlite.clone(), metrics, notifier, fs_service.clone(), @@ -48,7 +47,7 @@ async fn main() -> anyhow::Result<()> { }; let auth_service = domain::warren::service::auth::Service::new( - postgres, + sqlite, metrics, notifier, config.auth, diff --git a/backend/src/lib/config.rs b/backend/src/lib/config.rs index 8598470..52031b3 100644 --- a/backend/src/lib/config.rs +++ b/backend/src/lib/config.rs @@ -6,7 +6,6 @@ use tracing::level_filters::LevelFilter; use crate::domain::warren::service::auth::AuthConfig; const DATABASE_URL_KEY: &str = "DATABASE_URL"; -const DATABASE_NAME_KEY: &str = "DATABASE_NAME"; const SERVER_ADDRESS_KEY: &str = "SERVER_ADDRESS"; const SERVER_PORT_KEY: &str = "SERVER_PORT"; @@ -28,7 +27,6 @@ pub struct Config { pub static_frontend_dir: Option, pub database_url: String, - pub database_name: String, pub log_level: LevelFilter, @@ -45,7 +43,6 @@ impl Config { let static_frontend_dir = Self::load_env(STATIC_FRONTEND_DIRECTORY).ok(); let database_url = Self::load_env(DATABASE_URL_KEY)?; - let database_name = Self::load_env(DATABASE_NAME_KEY)?; let log_level = LevelFilter::from_str(&Self::load_env(LOG_LEVEL_KEY).unwrap_or("INFO".to_string())) @@ -62,7 +59,6 @@ impl Config { static_frontend_dir, database_url, - database_name, log_level, diff --git a/backend/src/lib/outbound/mod.rs b/backend/src/lib/outbound/mod.rs index ae72e56..0aac32f 100644 --- a/backend/src/lib/outbound/mod.rs +++ b/backend/src/lib/outbound/mod.rs @@ -2,4 +2,4 @@ pub mod file_system; pub mod metrics_debug_logger; pub mod notifier_debug_logger; pub mod oidc; -pub mod postgres; +pub mod sqlite; diff --git a/backend/src/lib/outbound/postgres/mod.rs b/backend/src/lib/outbound/postgres/mod.rs deleted file mode 100644 index 727b691..0000000 --- a/backend/src/lib/outbound/postgres/mod.rs +++ /dev/null @@ -1,95 +0,0 @@ -use std::{str::FromStr as _, time::Duration}; - -use anyhow::Context as _; -use sqlx::{ - ConnectOptions as _, Connection as _, PgConnection, PgPool, - postgres::{PgConnectOptions, PgPoolOptions}, -}; -use tokio::task::JoinHandle; -pub mod auth; -pub mod share; -pub mod warrens; - -#[derive(Debug, Clone)] -pub struct PostgresConfig { - database_url: String, - database_name: String, -} - -impl PostgresConfig { - pub fn new(database_url: String, database_name: String) -> Self { - Self { - database_url, - database_name, - } - } -} - -#[derive(Debug, Clone)] -pub struct Postgres { - pool: PgPool, -} - -impl Postgres { - pub async fn new(config: PostgresConfig) -> anyhow::Result { - let opts = PgConnectOptions::from_str(&config.database_url)?.disable_statement_logging(); - - let mut connection = PgConnection::connect_with(&opts) - .await - .context("Failed to connect to the PostgreSQL database")?; - - match sqlx::query("SELECT datname FROM pg_database WHERE datname = $1") - .bind(&config.database_name) - .fetch_one(&mut connection) - .await - { - Ok(_) => (), - Err(sqlx::Error::RowNotFound) => { - sqlx::query(&format!("CREATE DATABASE {}", config.database_name)) - .execute(&mut connection) - .await?; - } - Err(e) => return Err(e.into()), - }; - - connection.close().await?; - - let pool = PgPoolOptions::new() - .connect_with(opts.database(&config.database_name)) - .await?; - sqlx::migrate!("./migrations").run(&pool).await?; - - // 3600 seconds = 1 hour - Self::start_cleanup_tasks(pool.clone(), Duration::from_secs(3600)); - - Ok(Self { pool }) - } - - pub(super) fn start_cleanup_tasks(pool: PgPool, interval: Duration) -> JoinHandle<()> { - tokio::spawn(async move { - loop { - { - let Ok(mut connection) = pool.acquire().await else { - break; - }; - - if let Ok(count) = Self::delete_expired_auth_sessions(&mut connection).await { - tracing::debug!("Removed {count} expired auth session(s)"); - } - - if let Ok(count) = Self::delete_expired_shares(&mut connection).await { - tracing::debug!("Deleted {count} expired share(s)"); - } - } - - tokio::time::sleep(interval).await; - } - - tracing::debug!("Session cleanup task stopped"); - }) - } -} - -pub(super) fn is_not_found_error(err: &sqlx::Error) -> bool { - matches!(err, sqlx::Error::RowNotFound) -} diff --git a/backend/src/lib/outbound/postgres/auth.rs b/backend/src/lib/outbound/sqlite/auth.rs similarity index 94% rename from backend/src/lib/outbound/postgres/auth.rs rename to backend/src/lib/outbound/sqlite/auth.rs index 9be74d3..bae35cf 100644 --- a/backend/src/lib/outbound/postgres/auth.rs +++ b/backend/src/lib/outbound/sqlite/auth.rs @@ -7,7 +7,7 @@ use argon2::{ }, }; use chrono::Utc; -use sqlx::{Acquire as _, PgConnection}; +use sqlx::{Acquire as _, SqliteConnection}; use uuid::Uuid; use crate::domain::warren::{ @@ -40,9 +40,9 @@ use crate::domain::warren::{ ports::{AuthRepository, WarrenService}, }; -use super::{Postgres, is_not_found_error}; +use super::{Sqlite, is_not_found_error}; -impl AuthRepository for Postgres { +impl AuthRepository for Sqlite { async fn create_user(&self, request: CreateUserRequest) -> Result { let mut connection = self .pool @@ -368,9 +368,9 @@ impl AuthRepository for Postgres { } } -impl Postgres { +impl Sqlite { pub(super) async fn delete_expired_auth_sessions( - connection: &mut PgConnection, + connection: &mut SqliteConnection, ) -> Result { let delete_count = sqlx::query( " @@ -389,7 +389,7 @@ impl Postgres { async fn create_user( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, name: &UserName, email: &UserEmail, password: &UserPassword, @@ -431,7 +431,7 @@ impl Postgres { async fn create_or_update_user( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, sub: &String, name: &UserName, email: &UserEmail, @@ -546,7 +546,7 @@ impl Postgres { async fn edit_user( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, id: &Uuid, name: &UserName, email: &UserEmail, @@ -592,7 +592,7 @@ impl Postgres { async fn delete_user_sessions( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user_id: &Uuid, ) -> Result { let rows_affected = sqlx::query( @@ -613,7 +613,7 @@ impl Postgres { async fn delete_user_from_database( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user_id: &Uuid, ) -> Result { let user: User = sqlx::query_as( @@ -635,7 +635,7 @@ impl Postgres { async fn get_user_from_id( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, id: &Uuid, ) -> Result { let user: User = sqlx::query_as( @@ -657,7 +657,7 @@ impl Postgres { async fn get_user_from_email( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, email: &UserEmail, ) -> Result { let user: User = sqlx::query_as( @@ -698,7 +698,7 @@ impl Postgres { async fn create_session( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user: &User, expiration: &SessionExpirationTime, ) -> anyhow::Result { @@ -721,7 +721,7 @@ impl Postgres { ) VALUES ( $1, $2, - TO_TIMESTAMP($3::double precision / 1000) + datetime($3, 'unixepoch') ) RETURNING * @@ -729,7 +729,7 @@ impl Postgres { ) .bind(session_id) .bind(user.id()) - .bind(expiration_time) + .bind(expiration_time / 1000) .fetch_one(&mut *tx) .await?; @@ -740,7 +740,7 @@ impl Postgres { async fn get_auth_session( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, session_id: &AuthSessionId, ) -> Result { let session: AuthSession = sqlx::query_as( @@ -762,7 +762,7 @@ impl Postgres { async fn get_user_warrens( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user_id: &Uuid, ) -> Result, sqlx::Error> { let user_warrens: Vec = sqlx::query_as( @@ -784,7 +784,7 @@ impl Postgres { async fn get_all_user_warrens( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, ) -> Result, sqlx::Error> { let user_warrens: Vec = sqlx::query_as( " @@ -802,7 +802,7 @@ impl Postgres { async fn get_user_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user_id: &Uuid, warren_id: &Uuid, ) -> Result { @@ -825,7 +825,10 @@ impl Postgres { Ok(ids) } - async fn fetch_users(&self, connection: &mut PgConnection) -> Result, sqlx::Error> { + async fn fetch_users( + &self, + connection: &mut SqliteConnection, + ) -> Result, sqlx::Error> { let users: Vec = sqlx::query_as( " SELECT @@ -844,7 +847,7 @@ impl Postgres { async fn add_user_to_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user_warren: &UserWarren, ) -> Result { let user_warren: UserWarren = sqlx::query_as( @@ -855,14 +858,22 @@ impl Postgres { can_list_files, can_read_files, can_modify_files, - can_delete_files + can_delete_files, + can_list_shares, + can_create_shares, + can_modify_shares, + can_delete_shares ) VALUES ( $1, $2, $3, $4, $5, - $6 + $6, + $7, + $8, + $9, + $10 ) RETURNING * @@ -874,6 +885,10 @@ impl Postgres { .bind(user_warren.can_read_files()) .bind(user_warren.can_modify_files()) .bind(user_warren.can_delete_files()) + .bind(user_warren.can_list_shares()) + .bind(user_warren.can_create_shares()) + .bind(user_warren.can_modify_shares()) + .bind(user_warren.can_delete_shares()) .fetch_one(connection) .await?; @@ -882,7 +897,7 @@ impl Postgres { async fn update_user_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user_warren: &UserWarren, ) -> Result { let user_warren: UserWarren = sqlx::query_as( @@ -923,7 +938,7 @@ impl Postgres { async fn remove_user_from_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, user_id: &Uuid, warren_id: &Uuid, ) -> Result { diff --git a/backend/src/lib/outbound/sqlite/mod.rs b/backend/src/lib/outbound/sqlite/mod.rs new file mode 100644 index 0000000..542f313 --- /dev/null +++ b/backend/src/lib/outbound/sqlite/mod.rs @@ -0,0 +1,74 @@ +use std::{str::FromStr as _, time::Duration}; + +use sqlx::{ + ConnectOptions as _, SqlitePool, + sqlite::{SqliteConnectOptions, SqlitePoolOptions}, +}; +use tokio::task::JoinHandle; +pub mod auth; +pub mod share; +pub mod warrens; + +#[derive(Debug, Clone)] +pub struct SqliteConfig { + database_url: String, +} + +impl SqliteConfig { + pub fn new(database_url: String) -> Self { + Self { database_url } + } +} + +#[derive(Debug, Clone)] +pub struct Sqlite { + pool: SqlitePool, +} + +impl Sqlite { + pub async fn new(config: SqliteConfig) -> anyhow::Result { + let opts = SqliteConnectOptions::from_str(&config.database_url)? + .create_if_missing(true) + .extension_with_entrypoint( + "/var/lib/warren/sqlite_extensions/uuid", + "sqlite3_uuid_init", + ) + .disable_statement_logging(); + + let pool = SqlitePoolOptions::new().connect_with(opts).await?; + sqlx::migrate!("./migrations").run(&pool).await?; + + // 3600 seconds = 1 hour + Self::start_cleanup_tasks(pool.clone(), Duration::from_secs(3600)); + + Ok(Self { pool }) + } + + pub(super) fn start_cleanup_tasks(pool: SqlitePool, interval: Duration) -> JoinHandle<()> { + tokio::spawn(async move { + loop { + { + let Ok(mut connection) = pool.acquire().await else { + break; + }; + + if let Ok(count) = Self::delete_expired_auth_sessions(&mut connection).await { + tracing::debug!("Removed {count} expired auth session(s)"); + } + + if let Ok(count) = Self::delete_expired_shares(&mut connection).await { + tracing::debug!("Deleted {count} expired share(s)"); + } + } + + tokio::time::sleep(interval).await; + } + + tracing::debug!("Session cleanup task stopped"); + }) + } +} + +pub(super) fn is_not_found_error(err: &sqlx::Error) -> bool { + matches!(err, sqlx::Error::RowNotFound) +} diff --git a/backend/src/lib/outbound/postgres/share.rs b/backend/src/lib/outbound/sqlite/share.rs similarity index 92% rename from backend/src/lib/outbound/postgres/share.rs rename to backend/src/lib/outbound/sqlite/share.rs index 9f7bb08..ed30116 100644 --- a/backend/src/lib/outbound/postgres/share.rs +++ b/backend/src/lib/outbound/sqlite/share.rs @@ -1,10 +1,10 @@ use anyhow::anyhow; use argon2::{ Argon2, PasswordHash, PasswordVerifier as _, - password_hash::{PasswordHasher as _, SaltString, rand_core::OsRng}, + password_hash::{PasswordHasher as _, SaltString}, }; use chrono::{NaiveDateTime, Utc}; -use sqlx::{Acquire as _, PgConnection}; +use sqlx::{Acquire as _, SqliteConnection}; use thiserror::Error; use uuid::Uuid; @@ -17,7 +17,7 @@ use crate::domain::warren::models::{ warren::HasWarrenId as _, }; -use super::{Postgres, is_not_found_error}; +use super::{Sqlite, is_not_found_error}; #[derive(sqlx::FromRow)] struct ShareRow { @@ -62,7 +62,7 @@ impl TryFrom for Share { } pub(super) async fn get_share( - connection: &mut PgConnection, + connection: &mut SqliteConnection, request: GetShareRequest, ) -> anyhow::Result { let share_row: ShareRow = sqlx::query_as( @@ -90,7 +90,7 @@ pub(super) async fn get_share( } pub(super) async fn list_shares( - connection: &mut PgConnection, + connection: &mut SqliteConnection, request: ListSharesRequest, ) -> anyhow::Result> { let share_rows: Vec = sqlx::query_as( @@ -126,13 +126,13 @@ pub(super) async fn list_shares( } pub(super) async fn create_share( - connection: &mut PgConnection, + connection: &mut SqliteConnection, request: CreateShareRequest, ) -> anyhow::Result { let mut tx = connection.begin().await?; let password_hash = if let Some(password) = request.base().password() { - let salt = SaltString::generate(&mut OsRng); + let salt = SaltString::generate(&mut argon2::password_hash::rand_core::OsRng); let argon2 = Argon2::default(); Some( @@ -164,7 +164,7 @@ pub(super) async fn create_share( $2, $3, $4, - TO_TIMESTAMP($5::double precision / 1000) + datetime($5, 'unixepoch') ) RETURNING * @@ -174,7 +174,7 @@ pub(super) async fn create_share( .bind(request.warren_id()) .bind(request.base().path()) .bind(password_hash) - .bind(expires_at) + .bind(expires_at.map(|v| v / 1000)) .fetch_one(&mut *tx) .await?; @@ -184,7 +184,7 @@ pub(super) async fn create_share( } pub(super) async fn delete_share( - connection: &mut PgConnection, + connection: &mut SqliteConnection, request: DeleteShareRequest, ) -> anyhow::Result { let mut tx = connection.begin().await?; @@ -209,7 +209,7 @@ pub(super) async fn delete_share( } pub(super) async fn verify_password( - connection: &mut PgConnection, + connection: &mut SqliteConnection, request: VerifySharePasswordRequest, ) -> Result { let share_row: ShareRow = sqlx::query_as( @@ -264,9 +264,9 @@ pub(super) async fn verify_password( } } -impl Postgres { +impl Sqlite { pub(super) async fn delete_expired_shares( - connection: &mut PgConnection, + connection: &mut SqliteConnection, ) -> Result { let delete_count = sqlx::query( " diff --git a/backend/src/lib/outbound/postgres/warrens.rs b/backend/src/lib/outbound/sqlite/warrens.rs similarity index 91% rename from backend/src/lib/outbound/postgres/warrens.rs rename to backend/src/lib/outbound/sqlite/warrens.rs index 2fc14bd..5001ce0 100644 --- a/backend/src/lib/outbound/postgres/warrens.rs +++ b/backend/src/lib/outbound/sqlite/warrens.rs @@ -1,5 +1,5 @@ use anyhow::{Context as _, anyhow}; -use sqlx::{Acquire as _, PgConnection}; +use sqlx::{Acquire as _, SqliteConnection}; use uuid::Uuid; use crate::domain::warren::{ @@ -21,9 +21,9 @@ use crate::domain::warren::{ ports::WarrenRepository, }; -use super::{Postgres, is_not_found_error}; +use super::{Sqlite, is_not_found_error}; -impl WarrenRepository for Postgres { +impl WarrenRepository for Sqlite { async fn create_warren( &self, request: CreateWarrenRequest, @@ -220,10 +220,10 @@ impl WarrenRepository for Postgres { } } -impl Postgres { +impl Sqlite { async fn create_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, name: &WarrenName, path: &AbsoluteFilePath, ) -> Result { @@ -254,7 +254,7 @@ impl Postgres { async fn edit_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, id: &Uuid, name: &WarrenName, path: &AbsoluteFilePath, @@ -287,7 +287,7 @@ impl Postgres { async fn delete_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, id: &Uuid, ) -> Result { let mut tx = connection.begin().await?; @@ -313,7 +313,7 @@ impl Postgres { async fn get_warren( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, id: &Uuid, ) -> Result { let warren: Warren = sqlx::query_as( @@ -335,20 +335,28 @@ impl Postgres { async fn fetch_warrens( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, ids: &[Uuid], ) -> Result, sqlx::Error> { - let warrens: Vec = sqlx::query_as::( + let mut ids_as_string = ids.into_iter().fold(String::new(), |mut acc, id| { + let encoded = hex::encode(id.as_bytes()); + acc.push_str("x'"); + acc.push_str(encoded.as_str()); + acc.push_str("',"); + acc + }); + ids_as_string.pop(); + + let warrens: Vec = sqlx::query_as::(&format!( " SELECT * FROM warrens WHERE - id = ANY($1) + id IN ({ids_as_string}) ", - ) - .bind(ids) + )) .fetch_all(&mut *connection) .await?; @@ -357,9 +365,9 @@ impl Postgres { async fn fetch_all_warrens( &self, - connection: &mut PgConnection, + connection: &mut SqliteConnection, ) -> Result, sqlx::Error> { - let warrens: Vec = sqlx::query_as::( + let warrens: Vec = sqlx::query_as::( " SELECT * diff --git a/backend/warren.db b/backend/warren.db new file mode 100644 index 0000000..4fd3133 Binary files /dev/null and b/backend/warren.db differ diff --git a/compose.yaml b/compose.yaml index 86a1620..1aae5ed 100644 --- a/compose.yaml +++ b/compose.yaml @@ -1,7 +1,5 @@ services: warren: - depends_on: - - 'postgres' image: 'warren:latest' container_name: 'warren' build: '.' @@ -13,26 +11,15 @@ services: environment: - 'SERVER_ADDRESS=0.0.0.0' - 'SERVER_PORT=8080' - - 'DATABASE_URL=postgres://postgres:pg@warren-postgres:5432' - - 'DATABASE_NAME=warren' + - 'DATABASE_URL=sqlite:///var/lib/warren/warren.db' - 'SERVE_DIRECTORY=/serve' - 'CORS_ALLOW_ORIGIN=http://localhost:8081' - 'LOG_LEVEL=debug' - 'MAX_FILE_FETCH_BYTES=10737418240' + - 'ZIP_READ_BUFFER_BYTES=4096' volumes: - './backend/serve:/serve:rw' - postgres: - image: 'postgres:17' - container_name: 'warren-db' - hostname: 'warren-postgres' - networks: - - 'warren-net' - volumes: - - './postgres-data:/var/lib/postgresql/data' - environment: - - 'POSTGRES_PASSWORD=pg' - ports: - - '5432:5432/tcp' + - './backend/warren.db:/var/lib/warren/warren.db:rw' networks: warren-net: name: 'warren-net' diff --git a/frontend/components/SelectionRect.vue b/frontend/components/SelectionRect.vue index 8765f96..4d1f039 100644 --- a/frontend/components/SelectionRect.vue +++ b/frontend/components/SelectionRect.vue @@ -8,7 +8,7 @@ const width = computed(() => Math.abs(rect.a.x - rect.b.x)); const height = computed(() => Math.abs(rect.a.y - rect.b.y)); function onDocumentPointerDown(e: MouseEvent) { - if (e.button !== 0) { + if (e.button !== 0 || matchMedia('(pointer:coarse)').matches) { return; } @@ -21,7 +21,7 @@ function onDocumentPointerDown(e: MouseEvent) { } function onDocumentPointerMove(e: MouseEvent) { - if (!rect.enabled) { + if (!rect.enabled || matchMedia('(pointer:coarse)').matches) { return; } @@ -39,7 +39,11 @@ function onDocumentPointerMove(e: MouseEvent) { } function onDocumentPointerUp(e: MouseEvent) { - if (e.button !== 0 || !rect.enabled) { + if ( + !rect.enabled || + e.button !== 0 || + matchMedia('(pointer:coarse)').matches + ) { return; } diff --git a/frontend/components/admin/AddUserWarrenDialog.vue b/frontend/components/admin/AddUserWarrenDialog.vue index 103626a..4130e6d 100644 --- a/frontend/components/admin/AddUserWarrenDialog.vue +++ b/frontend/components/admin/AddUserWarrenDialog.vue @@ -51,6 +51,11 @@ const form = useForm({ canReadFiles: false, canModifyFiles: false, canDeleteFiles: false, + + canListShares: false, + canCreateShares: false, + canModifyShares: false, + canDeleteShares: false, }, }); @@ -231,6 +236,70 @@ const onSubmit = form.handleSubmit(async (values) => { + + + + List shares + + + + + + + + + + Create shares + + + + + + + + + + Modify shares + + + + + + + + + + Delete shares + + + + + + diff --git a/frontend/lib/schemas/admin.ts b/frontend/lib/schemas/admin.ts index d802493..3067124 100644 --- a/frontend/lib/schemas/admin.ts +++ b/frontend/lib/schemas/admin.ts @@ -25,6 +25,11 @@ export const userWarrenSchema = object({ canReadFiles: boolean().required(), canModifyFiles: boolean().required(), canDeleteFiles: boolean().required(), + + canListShares: boolean().required(), + canCreateShares: boolean().required(), + canModifyShares: boolean().required(), + canDeleteShares: boolean().required(), }); export const createWarrenSchema = object({