Initial commit

This commit is contained in:
2024-12-29 12:20:01 +01:00
commit 9ebe9b55bd
87 changed files with 21545 additions and 0 deletions

120
backend/.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,120 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Debug executable 'treasure_chest'",
"cargo": {
"args": [
"build",
"--bin=treasure_chest",
"--package=treasure_chest"
],
"filter": {
"name": "treasure_chest",
"kind": "bin"
}
},
"args": [],
"cwd": "${workspaceFolder}"
},
{
"type": "lldb",
"request": "launch",
"name": "Debug unit tests in executable 'treasure_chest'",
"cargo": {
"args": [
"test",
"--no-run",
"--bin=treasure_chest",
"--package=treasure_chest"
],
"filter": {
"name": "treasure_chest",
"kind": "bin"
}
},
"args": [],
"cwd": "${workspaceFolder}"
},
{
"type": "lldb",
"request": "launch",
"name": "Debug unit tests in library 'entity'",
"cargo": {
"args": [
"test",
"--no-run",
"--lib",
"--package=entity"
],
"filter": {
"name": "entity",
"kind": "lib"
}
},
"args": [],
"cwd": "${workspaceFolder}"
},
{
"type": "lldb",
"request": "launch",
"name": "Debug unit tests in library 'migration'",
"cargo": {
"args": [
"test",
"--no-run",
"--lib",
"--package=migration"
],
"filter": {
"name": "migration",
"kind": "lib"
}
},
"args": [],
"cwd": "${workspaceFolder}"
},
{
"type": "lldb",
"request": "launch",
"name": "Debug executable 'migration'",
"cargo": {
"args": [
"build",
"--bin=migration",
"--package=migration"
],
"filter": {
"name": "migration",
"kind": "bin"
}
},
"args": [],
"cwd": "${workspaceFolder}"
},
{
"type": "lldb",
"request": "launch",
"name": "Debug unit tests in executable 'migration'",
"cargo": {
"args": [
"test",
"--no-run",
"--bin=migration",
"--package=migration"
],
"filter": {
"name": "migration",
"kind": "bin"
}
},
"args": [],
"cwd": "${workspaceFolder}"
}
]
}

4025
backend/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

33
backend/Cargo.toml Normal file
View File

@@ -0,0 +1,33 @@
[package]
name = "treasure_chest"
version = "0.1.0"
edition = "2024"
[workspace]
members = [".", "entity", "migration"]
[dependencies]
axum = "0.8.7"
argon2 = "0.5.3"
base64 = "0.22.1"
chacha20poly1305 = "0.10.1"
chrono = "0.4.42"
config = "0.15.19"
entity = { path = "entity" }
env_logger = "0.11.8"
futures = "0.3"
log = "0.4.28"
migration = { path = "migration" }
regex = "1.12.2"
sea-orm = { version = "1.1.19", features = [
"macros",
"runtime-tokio-rustls",
"sqlx-mysql",
"with-chrono",
"with-uuid",
] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-util = { version = "0.7.17", features = ["io"] }
uuid = { version = "1.18.1", features = ["v4"] }

View File

@@ -0,0 +1,18 @@
{
// Connection string of the database that is used
"ConnectionString": "mysql://root:example@localhost/treasure_chest",
// Interface & port that will be used
"BindTo": "localhost:8000",
// Path of uploaded, encrypted files
"FilePath": "./files",
// Max download tries for a file (by all IPs)
"MaxDownloadTries": 3,
// Default lifefime (in days) of not downloaded, encrypted files
"DaysFileAvailable": 7,
// Max number of files that can be uploaded by a single IP in a day
"UserUploadsPerDay": 5,
// Name of header that will be used to indicate a requests IP. Ensure to configure your proxying server!
"IpHeaderName": "X-Forwarded-For",
// Max (unencrypted) file size in bytes. Mind that - during an upload request - both unencrypted and encrypted file are held in memory! So memory of request roughly equals {BodyMaxSize} * 2.
"BodyMaxSize": 10000000
}

1
backend/development/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
TESTFILE

View File

@@ -0,0 +1,13 @@
# Debug memory allocation
## Build test file
- 100 MB file: `if=/dev/random of=TESTFILE status=progress bs=1m count=100`
## Steps
1. `cargo install --features vendored-openssl cargo-instruments`
Installs _cargo-instruments_ without needing OpenSSL (macOS shenanigans -.-)
2. `cargo instruments -t Allocations (--release)`
3. Find PID
4. `kill -SIGINT [PID]` to stop gracefully without interrupting cargo-instruments.
5. XCode instrument _Allocations_ opens up automatically

11
backend/entity/Cargo.toml Normal file
View File

@@ -0,0 +1,11 @@
[package]
name = "entity"
version = "0.1.0"
edition = "2024"
[lib]
name = "entity"
path = "mod.rs"
[dependencies]
sea-orm = { version = "1.1.19" }

View File

@@ -0,0 +1,35 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "access_log")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false, column_type = "Binary(16)")]
pub id: Vec<u8>,
pub ip: String,
#[sea_orm(column_type = "Binary(16)")]
pub file_id: Vec<u8>,
pub date_time: DateTime,
pub successful: i8,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(
belongs_to = "super::file::Entity",
from = "Column::FileId",
to = "super::file::Column::Id",
on_update = "Cascade",
on_delete = "Cascade"
)]
File,
}
impl Related<super::file::Entity> for Entity {
fn to() -> RelationDef {
Relation::File.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

31
backend/entity/file.rs Normal file
View File

@@ -0,0 +1,31 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
use sea_orm::entity::prelude::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq)]
#[sea_orm(table_name = "file")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false, column_type = "Binary(16)")]
pub id: Vec<u8>,
#[sea_orm(unique)]
pub hash: String,
pub uploader_ip: String,
pub uploaded_at: DateTime,
pub download_until: DateTime,
#[sea_orm(column_type = "Binary(255)")]
pub encrypted_metadata: Vec<u8>,
}
#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)]
pub enum Relation {
#[sea_orm(has_many = "super::access_log::Entity")]
AccessLog,
}
impl Related<super::access_log::Entity> for Entity {
fn to() -> RelationDef {
Relation::AccessLog.def()
}
}
impl ActiveModelBehavior for ActiveModel {}

8
backend/entity/mod.rs Normal file
View File

@@ -0,0 +1,8 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
pub mod prelude;
pub mod access_log;
pub mod file;
pub use prelude::*;

View File

@@ -0,0 +1,4 @@
//! `SeaORM` Entity, @generated by sea-orm-codegen 1.1.0
pub use super::access_log::Entity as AccessLog;
pub use super::file::Entity as File;

View File

@@ -0,0 +1,16 @@
[package]
name = "migration"
version = "0.1.0"
edition = "2024"
publish = false
[lib]
name = "migration"
path = "src/lib.rs"
[dependencies]
async-std = { version = "1", features = ["attributes", "tokio1"] }
[dependencies.sea-orm-migration]
version = "1.1.19"
features = ["sqlx-mysql", "runtime-tokio-rustls"]

View File

@@ -0,0 +1,12 @@
pub use sea_orm_migration::prelude::*;
mod m20250114_200507_create_tables;
pub struct Migrator;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![Box::new(m20250114_200507_create_tables::Migration)]
}
}

View File

@@ -0,0 +1,98 @@
use sea_orm_migration::{
prelude::*,
schema::{blob, boolean, date_time, string, uuid},
};
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.create_table(
Table::create()
.table(File::Table)
.if_not_exists()
.col(uuid(File::Id).not_null().primary_key())
.col(string(File::Hash).not_null().unique_key())
.col(string(File::UploaderIp).not_null())
.col(date_time(File::UploadedAt).not_null())
.col(date_time(File::DownloadUntil).not_null())
.col(blob(File::EncryptedMetadata).not_null())
.to_owned(),
)
.await?;
manager
.create_table(
Table::create()
.table(AccessLog::Table)
.if_not_exists()
.col(uuid(AccessLog::Id).not_null().primary_key())
.col(string(AccessLog::Ip).not_null())
.col(uuid(AccessLog::FileId).not_null())
.col(date_time(AccessLog::DateTime).not_null())
.col(boolean(AccessLog::Successful).not_null())
.to_owned(),
)
.await?;
manager
.create_foreign_key(
ForeignKey::create()
.name("access_log_file")
.from(AccessLog::Table, AccessLog::FileId)
.to(File::Table, File::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade)
.to_owned(),
)
.await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager
.drop_foreign_key(ForeignKey::drop().name("access_log_file").to_owned())
.await?;
manager
.drop_table(Table::drop().table(File::Table).to_owned())
.await?;
manager
.drop_table(Table::drop().table(AccessLog::Table).to_owned())
.await?;
Ok(())
}
}
#[derive(DeriveIden)]
pub enum File {
Table,
Id,
Hash,
#[sea_orm(iden = "uploaded_at")]
UploadedAt,
#[sea_orm(iden = "uploader_ip")]
UploaderIp,
#[sea_orm(iden = "download_until")]
DownloadUntil,
#[sea_orm(iden = "encrypted_metadata")]
EncryptedMetadata,
}
#[derive(DeriveIden)]
enum AccessLog {
Table,
Id,
Ip,
#[sea_orm(iden = "file_id")]
FileId,
#[sea_orm(iden = "date_time")]
DateTime,
Successful,
}

View File

@@ -0,0 +1,6 @@
use sea_orm_migration::prelude::*;
#[async_std::main]
async fn main() {
cli::run_cli(migration::Migrator).await;
}

12
backend/src/api/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
//! API module.
//!
//! This module contains the routes and server setup for the API. It includes
//! submodules for configuration, download, and upload routes, as well as the
//! server initialization.
mod routes {
pub mod configuration;
pub mod download;
pub mod upload;
}
mod server;
pub use server::listen;

View File

@@ -0,0 +1,30 @@
use crate::configuration::CONFIGURATION;
use axum::response::IntoResponse;
use axum::Json;
use serde::Serialize;
/// A struct representing the configuration response.
///
/// This struct is used to serialize the configuration settings into a JSON
/// response. The fields are renamed to match expected JSON keys.
/// This response is returned in the [`handler`] function.
#[derive(Serialize)]
pub struct Response {
#[serde(rename = "BodyMaxSize")]
pub body_max_size: usize,
#[serde(rename = "DaysFileAvailable")]
pub default_days_lifetime: u64,
}
/// Configuration endpoint.
///
/// This function creates a `Response` struct with the current configuration
/// settings and returns it as a JSON.
pub async fn handler() -> impl IntoResponse {
let response = Response {
body_max_size: CONFIGURATION.body_max_size,
default_days_lifetime: CONFIGURATION.days_file_available,
};
Json(response)
}

View File

@@ -0,0 +1,88 @@
use crate::database;
use crate::encryption;
use crate::encryption::Encoding;
use crate::encryption::Encryption;
use crate::error::Error;
use crate::file;
use crate::request;
use crate::return_logged;
use crate::util;
use axum::extract::{Path, State};
use axum::http::HeaderMap;
use axum::response::IntoResponse;
use axum::{http::StatusCode, Json};
use sea_orm::DatabaseConnection;
use serde::Deserialize;
use uuid::Uuid;
/// A struct representing the request body for the download endpoint.
///
/// This struct is used to deserialize the JSON request body containing the
/// key needed to decrypt the requested file.
#[derive(Deserialize)]
pub struct RequestBody {
pub key: String,
}
/// Handles the file download endpoint.
///
/// This function processes the download request, validates the key, logs the
/// access, decrypts the file, and returns the file content along with the
/// appropriate headers.
pub async fn handler(
State(database_connection): State<DatabaseConnection>,
id: Path<Uuid>,
headers: HeaderMap,
body: Json<RequestBody>,
) -> impl IntoResponse {
let request_ip = match request::get_request_ip(&headers) {
Ok(ip) => ip,
Err(error) => return_logged!(error, StatusCode::BAD_GATEWAY),
};
let file = match database::get_downloadable_file(&database_connection, &id).await {
Ok(None) => return Err(StatusCode::NOT_FOUND),
Ok(Some(file)) => file,
Err(error) => return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR),
};
let Ok(key) = util::get_validated_key(&body.key, &file.hash) else {
if let Err(error) =
database::store_access_log(&database_connection, &request_ip, &id, false).await
{
return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR);
}
return Err(StatusCode::UNAUTHORIZED);
};
if let Err(error) =
database::store_access_log(&database_connection, &request_ip, &id, true).await
{
return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR)
}
let content = match file::load_data(&id)
.and_then(encryption::Data::decode)
.and_then(|data| data.decrypt(&key))
{
Ok(content) => content,
Err(error) => return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR),
};
let response_headers = match encryption::Data::decode(file.encrypted_metadata)
.and_then(|data| data.decrypt(&key))
.and_then(|data| String::from_utf8(data).map_err(|_| Error::DecryptionFailed))
.and_then(|json| {
serde_json::from_str::<file::Metadata>(&json).map_err(Error::JsonSerializationFailed)
}) {
Ok(metadata) => metadata.into(),
_ => HeaderMap::new(),
};
if let Err(error) = file::delete(&id) {
log::error!("Could not delete used file {}: {error:?}", id.to_string());
}
Ok((response_headers, content))
}

View File

@@ -0,0 +1,129 @@
use crate::configuration::CONFIGURATION;
use crate::encryption::{Encoding, Encryption};
use crate::error::Error;
use crate::file;
use crate::hash::{Hash, Hashing};
use crate::request;
use crate::return_logged;
use crate::{database, encryption};
use axum::extract::State;
use axum::http::HeaderMap;
use axum::response::IntoResponse;
use axum::{extract::Request, http::StatusCode, Json};
use base64::prelude::BASE64_URL_SAFE;
use base64::Engine;
use futures::{StreamExt, TryStreamExt};
use sea_orm::DatabaseConnection;
use serde::Serialize;
use std::io::{Error as IoError, ErrorKind};
use tokio_util::io::StreamReader;
use uuid::Uuid;
// A struct representing the response for the upload endpoint.
///
/// This struct is used to serialize the response containing the file id and
/// the encryption key.
#[derive(Serialize)]
pub struct Response {
pub id: String,
pub key: String,
}
/// Handles the file upload endpoint.
///
/// This function processes the upload request, validates the request, stores
/// the file, and returns the file id and encryption key.
pub async fn handler(
State(database_connection): State<DatabaseConnection>,
headers: HeaderMap,
request: Request,
) -> impl IntoResponse {
let request_ip = match request::get_request_ip(&headers) {
Ok(ip) => ip,
Err(error) => return_logged!(error, StatusCode::BAD_GATEWAY),
};
match database::is_upload_limit_reached(&database_connection, &request_ip).await {
Ok(false) => (),
Ok(true) => return Err(StatusCode::TOO_MANY_REQUESTS),
Err(error) => return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR),
}
let content = match extract_body(request).await {
Ok(content) => content,
Err(_) => return Err(StatusCode::PAYLOAD_TOO_LARGE),
};
let (encryption_data, key) = match encryption::Data::encrypt(content) {
Ok(result) => result,
Err(error) => return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR),
};
let id = Uuid::new_v4();
if let Err(error) = file::store_data(&id, encryption_data.encode()) {
return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR);
};
let encrypted_metadata =
match serde_json::to_string(&std::convert::Into::<file::Metadata>::into(headers))
.map_err(Error::JsonSerializationFailed)
.and_then(|json| encryption::Data::encrypt_with_key(json.bytes(), &key))
.map(encryption::definitions::Encoding::encode)
{
Ok(metadata) => metadata,
Err(error) => return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR),
};
if encrypted_metadata.len() > 255 {
return Err(StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE);
}
let hash = match Hash::hash(&key) {
Ok(hash) => hash,
Err(error) => return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR),
};
if let Err(error) = database::store_file(
&database_connection,
&id,
hash,
request_ip,
encrypted_metadata,
)
.await
{
return_logged!(error, StatusCode::INTERNAL_SERVER_ERROR);
};
Ok(Json(Response {
id: id.into(),
key: BASE64_URL_SAFE.encode(&key),
}))
}
async fn extract_body(request: Request) -> Result<Vec<u8>, IoError> {
let mut body = vec![];
let body_data_stream = request
.into_body()
.into_data_stream()
/* Add one byte to max size for range check later. If this byte is filled,
* we know that the body is too large. */
.take(CONFIGURATION.body_max_size + 1)
.map_err(|err| IoError::new(ErrorKind::Other, err));
let body_reader = StreamReader::new(body_data_stream);
futures::pin_mut!(body_reader);
tokio::io::copy(&mut body_reader, &mut body).await?;
if body.len() > CONFIGURATION.body_max_size {
return Err(IoError::new(
ErrorKind::StorageFull,
"Max body size exceeded",
));
}
Ok(body)
}

81
backend/src/api/server.rs Normal file
View File

@@ -0,0 +1,81 @@
use super::routes;
use crate::configuration::CONFIGURATION;
use axum::{
routing::{get, post},
Router,
};
use sea_orm::DatabaseConnection;
use std::io::Result;
use tokio::{net::TcpListener, sync::broadcast};
/// Starts the server and listens for incoming connections.
///
/// This function sets up the routes for the API, binds the server to the
/// specified address, and starts listening for incoming connections. It also
/// handles graceful shutdown when a shutdown signal is received.
///
/// # Routes
/// See [`routes`] folder for all available routes.
///
/// # Arguments
///
/// * `connection` - A `DatabaseConnection` instance used to interact with the
/// database.
/// * `shutdown` - A `shotgun::Receiver<()>` used to receive shutdown signals.
///
/// # Returns
///
/// * [`Ok<()>`] on graceful shutdown
/// * [`Err<Error>`] on error
///
/// # Example
///
/// ```rust
/// use sea_orm::DatabaseConnection;
/// use tokio::{runtime::Runtime, sync::broadcast};
///
/// let connection = DatabaseConnection::new();
/// let (shutdown_sender, shutdown_receiver) = shotgun::channel();
///
/// let rt = Runtime::new().unwrap();
/// rt.block_on(async {
/// listen(connection, shutdown_receiver).await.unwrap();
/// });
/// ```
pub async fn listen(connection: DatabaseConnection, mut shutdown: broadcast::Receiver<()>) -> Result<()> {
let app = Router::new()
.route("/api/files", post(routes::upload::handler))
.route("/api/files/{id}/download", post(routes::download::handler))
.route("/api/configuration", get(routes::configuration::handler))
.with_state(connection);
let listener = TcpListener::bind(&CONFIGURATION.listening_address).await?;
axum::serve(listener, app)
.with_graceful_shutdown(async move {
let _ = shutdown.recv().await;
})
.await
}
/// Logs an error and returns a specified status.
///
/// This macro logs the provided error using the `log` crate and then returns
/// the specified status.
///
/// # Arguments
///
/// * `$error` - The error message to be logged.
/// * `$status` - The HTTP status code to be returned.
///
/// # Example
///
/// ```rust
/// return_logged!(some_error, StatusCode::INTERNAL_SERVER_ERROR);
#[macro_export]
macro_rules! return_logged {
($error: expr, $status: expr) => {{
log::error!("{:?}", $error);
return Err($status);
}};
}

71
backend/src/cleanup.rs Normal file
View File

@@ -0,0 +1,71 @@
use crate::{database, error::Result, file};
use sea_orm::DatabaseConnection;
use std::time::Duration;
use tokio::{select, time, sync::broadcast};
use uuid::Uuid;
/// The interval in seconds between each cleanup operation.
/// Currently set to 10 minutes.
const CLEANUP_INTERVAL_SECONDS: u64 = 10 * 60; /* 10 minutes */
/// Runs the cleanup process in a loop, until `shutdown` signal is received.
///
/// # Arguments
///
/// * `database_connection` - A connection to the database.
/// * `shutdown` - A broadcast receiver to listen for shutdown signal.
///
/// # Returns
///
/// * [`Ok<()>`] on successful cleanup process
/// * [`Err<Error>`] on error
pub async fn run(
database_connection: DatabaseConnection,
shutdown: broadcast::Receiver<()>,
) -> Result<()> {
loop {
let mut shutdown = shutdown.resubscribe();
select! {
_ = time::sleep(Duration::from_secs(CLEANUP_INTERVAL_SECONDS)) => (),
_ = shutdown.recv() => return Ok(()),
};
log::info!("Cleaning up outdating files...");
database::remove_undownloadable_files(&database_connection).await?;
delete_outdated_files(&database_connection).await?;
}
}
/// Deletes outdated files from the file system.
///
/// # Arguments
///
/// * `database_connection` - A connection to the database.
///
/// # Returns
///
/// * [`Ok<()>`] on successful cleanup
/// * [`Err<Error>`] on error
async fn delete_outdated_files(database_connection: &DatabaseConnection) -> Result<()> {
let downloadable_file_ids = database::get_downloadable_file_ids(database_connection).await?;
let stored_file_ids = file::get_stored_file_ids()?;
let file_ids_to_delete = stored_file_ids
.iter()
.filter(|stored| {
downloadable_file_ids
.iter()
.all(|downloadable| &downloadable != stored)
})
.collect::<Vec<&Uuid>>();
for file_id in file_ids_to_delete {
file::delete(file_id)?;
log::info!("Deleted outdated file: {file_id}");
}
Ok(())
}

View File

@@ -0,0 +1,87 @@
use chrono::Days;
use config::{Environment, File, FileFormat};
use serde::Deserialize;
use std::{path::PathBuf, process::exit, sync::LazyLock};
pub const CONFIG_FILE_NAME: &str = "config.json";
pub const CONFIG_ENV_PREFIX: &str = "TREASURE_CHEST";
pub static CONFIGURATION: LazyLock<Configuration> = LazyLock::new(build);
/// Configuration that can be automatically read from Json / env,
/// containing only base types. See [`Configuration`]
#[derive(Deserialize)]
struct RawConfiguration {
#[serde(rename = "ConnectionString")]
pub connection_string: String,
#[serde(rename = "BindTo")]
pub listening_address: String,
#[serde(rename = "FilePath")]
pub file_path: PathBuf,
#[serde(rename = "DaysFileAvailable")]
pub days_file_available: u64,
#[serde(rename = "UserUploadsPerDay")]
pub user_uploads_per_day: u32,
#[serde(rename = "MaxDownloadTries")]
pub max_download_tries: u32,
#[serde(rename = "IpHeaderName")]
pub ip_header_name: String,
#[serde(rename = "BodyMaxSize")]
pub body_max_size: usize,
}
/// Configuration of program
pub struct Configuration {
/// Database connection string
pub connection_string: String,
/// Address to listen to (e.g. "_localhost:8080_")
pub listening_address: String,
/// Path of encrypted files
pub file_path: PathBuf,
/// Lifetime of uploaded files until deletion
pub file_lifetime: Days,
/// Raw value of `file_lifetime`
pub days_file_available: u64,
/// Number of max uploads by a single IP (rate limiting)
pub ip_uploads_per_day: u32,
/// Number of max tries to access a file (in case of wrong keys etc)
pub max_download_tries: u32,
/// Name of IP header, set by proxy server
pub ip_header_name: String,
/// Max size of request body (in bytes)
pub body_max_size: usize,
}
/// Builds [`Configuration`] by configuration file and env vars
///
/// # Returns
///
/// Instance of [`Configuration`]
///
/// # Note
///
/// If configuration is not buildable, it exits the program.
pub fn build() -> Configuration {
let Ok(raw) = config::Config::builder()
.add_source(File::new(CONFIG_FILE_NAME, FileFormat::Json).required(false))
.add_source(Environment::with_prefix(CONFIG_ENV_PREFIX))
.build()
.expect("Configuration is not buildable")
.try_deserialize::<RawConfiguration>()
else {
log::error!("Could not build configuration. Bye.");
exit(1);
};
Configuration {
connection_string: raw.connection_string,
listening_address: raw.listening_address,
file_path: raw.file_path,
file_lifetime: Days::new(raw.days_file_available),
days_file_available: raw.days_file_available,
max_download_tries: raw.max_download_tries,
ip_uploads_per_day: raw.user_uploads_per_day,
ip_header_name: raw.ip_header_name,
body_max_size: raw.body_max_size,
}
}

283
backend/src/database.rs Normal file
View File

@@ -0,0 +1,283 @@
use super::error::{Error, Result};
use crate::configuration::CONFIGURATION;
use chrono::{Days, Utc};
use migration::ExprTrait;
use sea_orm::sea_query::Query;
use sea_orm::{ColumnTrait, Condition, FromQueryResult};
use sea_orm::{DatabaseConnection, EntityTrait, QueryFilter, QuerySelect, Set};
use uuid::Uuid;
/// Wrapper for `COUNT(*)` queries
#[derive(FromQueryResult)]
struct CountResult {
count: i64,
}
/// Gets file from database for id that can currently be downloaded
///
/// Checks if file has already been downloaded and if it's still in time range.
///
/// # Arguments
///
/// * `database_connection` - [`DatabaseConnection`] to use
/// * `id` - Id of the file entry
///
/// # Returns
///
/// * [`Ok<Some<Model>>`] containing downloadable file model
/// * [`Ok<None>`] on file not existing or outdated
/// * [`Err<Error>`] on error
pub async fn get_downloadable_file(
database_connection: &DatabaseConnection,
id: &Uuid,
) -> Result<Option<entity::file::Model>> {
entity::File::find()
.filter(entity::file::Column::Id.eq(*id))
.filter(entity::file::Column::DownloadUntil.gte(Utc::now()))
.filter(
entity::file::Column::Id.not_in_subquery(
Query::select()
.column(entity::access_log::Column::FileId)
.from(entity::access_log::Entity)
.cond_where(Condition::all().add(entity::access_log::Column::Successful.eq(1)))
.to_owned(),
),
)
.filter(
entity::file::Column::Id.not_in_subquery(
Query::select()
.column(entity::access_log::Column::FileId)
.from(entity::access_log::Entity)
.group_by_col(entity::access_log::Column::FileId)
.cond_having(
Condition::all().add(
entity::access_log::Column::FileId
.count()
.gte(CONFIGURATION.max_download_tries),
),
)
.to_owned(),
),
)
.one(database_connection)
.await
.map_err(Error::DatabaseOperationFailed)
}
/// Gets all file ids from database that can currently be downloaded
///
/// Checks if file has already been downloaded and if it's still in time range.
///
/// # Arguments
///
/// * `database_connection` - [`DatabaseConnection`] to use
///
/// # Returns
///
/// * [`Ok<Vec<Uuid>>`] containing ids of all files that can still be downloaded
/// * [`Err<Error>`] on error
pub async fn get_downloadable_file_ids(
database_connection: &DatabaseConnection,
) -> Result<Vec<Uuid>> {
entity::File::find()
.filter(entity::file::Column::DownloadUntil.gte(Utc::now()))
.filter(
entity::file::Column::Id.not_in_subquery(
Query::select()
.column(entity::access_log::Column::FileId)
.from(entity::access_log::Entity)
.cond_where(Condition::all().add(entity::access_log::Column::Successful.eq(1)))
.to_owned(),
),
)
.filter(
entity::file::Column::Id.not_in_subquery(
Query::select()
.column(entity::access_log::Column::FileId)
.from(entity::access_log::Entity)
.group_by_col(entity::access_log::Column::FileId)
.cond_having(
Condition::all().add(
entity::access_log::Column::FileId
.count()
.gte(CONFIGURATION.max_download_tries),
),
)
.to_owned(),
),
)
.select_only()
.column(entity::file::Column::Id)
.into_tuple()
.all(database_connection)
.await
.map_err(Error::DatabaseOperationFailed)
}
/// Removes undownloadable files from the database.
///
/// This function deletes files that are either past their download expiration
/// date, have been successfully downloaded, or have exceeded the maximum
/// number of allowed download attempts.
///
/// # Arguments
///
/// * `database_connection` - A `DatabaseConnection` instance used to interact
/// with the database.
///
/// # Returns
///
/// * [`Ok<()>`] on success
/// * [`Err<Error>`] on error
pub async fn remove_undownloadable_files(database_connection: &DatabaseConnection) -> Result<()> {
entity::File::delete_many()
.filter(
Condition::any()
.add(entity::file::Column::DownloadUntil.lt(Utc::now()))
.add(
entity::file::Column::Id.in_subquery(
Query::select()
.column(entity::access_log::Column::FileId)
.from(entity::access_log::Entity)
.cond_where(
Condition::all().add(entity::access_log::Column::Successful.eq(1)),
)
.to_owned(),
),
)
.add(
entity::file::Column::Id
.in_subquery(
Query::select()
.column(entity::access_log::Column::FileId)
.from(entity::access_log::Entity)
.group_by_col(entity::access_log::Column::FileId)
.cond_having(
Condition::all().add(
entity::access_log::Column::FileId
.count()
.gte(CONFIGURATION.max_download_tries),
),
)
.to_owned(),
)
.to_owned(),
),
)
.exec(database_connection)
.await
.map(|_| ())
.map_err(Error::DatabaseOperationFailed)
}
/// Returns whether given `ip` may currently upload a file
///
/// # Arguments
///
/// * `database_connection` - [`DatabaseConnection`] to use
/// * `ip` - Ip to check
///
/// # Returns
///
/// * [`Ok<true>`] if client may upload a file
/// * [`Ok<false>`] if client must not upload a file at this time
/// * [`Err<Error>`] on error
pub async fn is_upload_limit_reached(
database_connection: &DatabaseConnection,
ip: &str,
) -> Result<bool> {
let min_uploaded_at = Utc::now()
.checked_sub_days(Days::new(1))
.ok_or(Error::DateCalculationFailed)?;
let count = entity::File::find()
.select_only()
.column_as(entity::file::Column::Id.count(), "count")
.filter(entity::file::Column::UploaderIp.eq(ip))
.filter(entity::file::Column::UploadedAt.gte(min_uploaded_at.naive_utc()))
.into_model::<CountResult>()
.one(database_connection)
.await
.map_err(Error::DatabaseOperationFailed)?
.unwrap_or(CountResult { count: 0 })
.count;
Ok(count >= CONFIGURATION.ip_uploads_per_day.into())
}
/// Store new file entry to database
///
/// # Arguments
///
/// * `database_connection` - [`DatabaseConnection`] to use
/// * `id` - Id of new file
/// * `hash` - Encryption key hash
/// * `uploader_ip` - Ip of client uploading this file
/// * `encrypted_metadata` - File metadata in encrypted form
///
/// # Returns
///
/// * [`Ok<()>`] on success
/// * [`Err<Error>`] on error
pub async fn store_file(
database_connection: &DatabaseConnection,
id: &Uuid,
hash: String,
uploader_ip: String,
encrypted_metadata: Vec<u8>,
) -> Result<()> {
let now = Utc::now();
let download_until = now
.checked_add_days(CONFIGURATION.file_lifetime)
.ok_or(Error::DateCalculationFailed)?;
let file = entity::file::ActiveModel {
id: Set((*id).into()),
hash: Set(hash),
uploader_ip: Set(uploader_ip),
uploaded_at: Set(now.naive_utc()),
download_until: Set(download_until.naive_utc()),
encrypted_metadata: Set(encrypted_metadata),
};
entity::File::insert(file)
.exec(database_connection)
.await
.map(|_| ())
.map_err(Error::DatabaseOperationFailed)
}
/// Store new access log entry to database
///
/// # Arguments
///
/// * `database_connection` - [`DatabaseConnection`] to use
/// * `ip` - Ip of the client accessing the file
/// * `file_id` - Id of the file being accessed
/// * `successful` - Whether validation was successful or not
///
/// # Returns
///
/// * [`Ok<()>`] on success
/// * [`Err<Error>`] on error
pub async fn store_access_log(
database_connection: &DatabaseConnection,
ip: &str,
file_id: &Uuid,
successful: bool,
) -> Result<()> {
let log = entity::access_log::ActiveModel {
id: Set(Uuid::new_v4().into()),
ip: Set(ip.into()),
file_id: Set((*file_id).into()),
date_time: Set(Utc::now().naive_utc()),
successful: Set(i8::from(successful)),
};
entity::AccessLog::insert(log)
.exec(database_connection)
.await
.map(|_| ())
.map_err(Error::DatabaseOperationFailed)
}

View File

@@ -0,0 +1,71 @@
use crate::error::Result;
/// Provides functions to make encrypted data store-able.
/// Handles encoding and decoding of encrypted data including things like nonce.
/// Encoded data can be stored safely.
pub trait Encoding<T> {
/// Encodes data so that it can be stored.
///
/// # Consumes
///
/// * self
///
/// # Returns
///
/// * Encoded data
fn encode(self) -> Vec<u8>;
/// Decodes previously encoded data so that it can be decrypted later.
///
/// # Arguments
///
/// * `data` - Encoded data to decode into `self`
///
/// # Returns
///
/// * [`Ok<self>`] on success
/// * [`Err<Error>`] on error
fn decode<TI: IntoIterator<Item = u8>>(data: TI) -> Result<T>;
}
/// Provides functions to create encrypted data and decrypt it back.
pub trait Encryption<T> {
/// Encrypts given data.
/// Encrypts plain data and returns encryption-data and the key as a tuple.
///
/// # Arguments
///
/// * `plain` - Plain data to encrypt
///
/// # Returns
///
/// * [`Ok<(T, Vec<u8>)>`] on success, containing (Self, decryption key)
/// * [`Err<Error>`] on error
fn encrypt<TI: IntoIterator<Item = u8>>(plain: TI) -> Result<(T, Vec<u8>)>;
// Encrypts plain data with given key and returns encryption-data.
///
///
/// # Arguments
///
/// * `plain` - Plain data to encrypt
/// * `key` - Predefined key to use
///
/// # Returns
///
/// * [`Ok<Vec<u8>>`] on success, containing encrypted data
/// * [`Err<Error>`] on error
fn encrypt_with_key<TI: IntoIterator<Item = u8>>(plain: TI, key: &[u8]) -> Result<T>;
/// Decrypts data with given key.
///
/// # Arguments
///
/// * `key` - Decryption key for this encrypted data
///
/// # Returns
///
/// * [`Ok<Vec<u8>>`] on success with decrypted data
/// * [`Err<Error>`] on error
fn decrypt(self, key: &[u8]) -> Result<Vec<u8>>;
}

View File

@@ -0,0 +1,10 @@
//! Encryption module.
//!
//! This module provides encryption functionalities. It includes submodules
//! for encryption definitions. Currently this contains the XChaCha20Poly1305
//! encryption scheme.
pub(crate) mod definitions;
mod xchacha20poly1305;
pub use definitions::*;
pub use xchacha20poly1305::XChaCha20Poly1305Data as Data;

View File

@@ -0,0 +1,94 @@
use super::definitions::{Encoding, Encryption};
use crate::error::{Error, Result};
use chacha20poly1305::{
aead::{AeadCore, AeadMutInPlace, KeyInit, OsRng},
Key, XChaCha20Poly1305, XNonce,
};
/// Container for encrypted data and the necessary information to decrypt it.
pub struct XChaCha20Poly1305Data {
// Nonce for decrypting `content`
nonce: Vec<u8>,
// Encrypted data
content: Vec<u8>,
}
impl Encoding<XChaCha20Poly1305Data> for XChaCha20Poly1305Data {
fn encode(mut self) -> Vec<u8> {
let mut data = vec![];
data.append(&mut self.nonce);
data.append(&mut self.content);
data
}
fn decode<TI: IntoIterator<Item = u8>>(data: TI) -> Result<XChaCha20Poly1305Data> {
let mut data = data.into_iter().collect::<Vec<u8>>();
if data.len() < 24 {
return Err(Error::InvalidEncryptionData("Data too short".into()));
}
let content = data.split_off(24);
Ok(Self {
nonce: data,
content,
})
}
}
impl Encryption<XChaCha20Poly1305Data> for XChaCha20Poly1305Data {
fn encrypt<TI: IntoIterator<Item = u8>>(plain: TI) -> Result<(XChaCha20Poly1305Data, Vec<u8>)> {
let key = XChaCha20Poly1305::generate_key(&mut OsRng);
let mut cipher = XChaCha20Poly1305::new(&key);
let nonce = XChaCha20Poly1305::generate_nonce(&mut OsRng);
let mut plain = plain.into_iter().collect::<Vec<u8>>();
cipher
.encrypt_in_place(&nonce, &[], &mut plain)
.map_err(|_| Error::EncryptionFailed)?;
let encryption_data = XChaCha20Poly1305Data {
nonce: nonce.to_vec(),
content: plain,
};
Ok((encryption_data, key.to_vec()))
}
fn encrypt_with_key<TI: IntoIterator<Item = u8>>(
plain: TI,
key: &[u8],
) -> Result<XChaCha20Poly1305Data> {
let key = Key::from_slice(key);
let mut cipher = XChaCha20Poly1305::new(key);
let nonce = XChaCha20Poly1305::generate_nonce(&mut OsRng);
let mut content = plain.into_iter().collect::<Vec<u8>>();
cipher
.encrypt_in_place(&nonce, &[], &mut content)
.map_err(|_| Error::EncryptionFailed)?;
Ok(XChaCha20Poly1305Data {
nonce: nonce.to_vec(),
content,
})
}
fn decrypt(mut self, key: &[u8]) -> Result<Vec<u8>> {
if key.len() != 32 {
return Err(Error::InvalidEncryptionData("Invalid key length".into()));
}
let key = Key::from_slice(key);
let mut cipher = XChaCha20Poly1305::new(key);
let nonce = XNonce::from_slice(&self.nonce);
cipher
.decrypt_in_place(nonce, &[], &mut self.content)
.map_err(|_| Error::EncryptionFailed)?;
Ok(self.content)
}
}

48
backend/src/error.rs Normal file
View File

@@ -0,0 +1,48 @@
//! Error module for this whole crate
use sea_orm::DbErr;
use std::{fmt, result};
pub type Result<T> = result::Result<T, Error>;
pub enum Error {
DateCalculationFailed,
DatabaseOperationFailed(DbErr),
IpHeaderMissing(String),
IpHeaderInvalid,
SavingFileFailed(std::io::Error),
LoadingFileFailed(std::io::Error),
DeletingFileFailed(std::io::Error),
ReadingDirectoryFailed(std::io::Error),
EncryptionFailed,
DecryptionFailed,
KeyInvalid,
JsonSerializationFailed(serde_json::Error),
InvalidEncryptionData(String),
HashingFailure(String),
HashVerificationFailure(String),
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::DateCalculationFailed => write!(f, "Date calculation failed"),
Self::DatabaseOperationFailed(inner) => {
write!(f, "Database operation failed: {inner}")
}
Self::IpHeaderMissing(header_name) => write!(f, "Ip header {header_name} missing"),
Self::IpHeaderInvalid => write!(f, "Ip header invalid"),
Self::SavingFileFailed(inner) => write!(f, "Saving file failed: {inner}"),
Self::LoadingFileFailed(inner) => write!(f, "Loading file failed: {inner}"),
Self::DeletingFileFailed(inner) => write!(f, "Removing file failed: {inner}"),
Self::ReadingDirectoryFailed(inner) => write!(f, "Reading directory failed: {inner}"),
Self::EncryptionFailed => write!(f, "Encryption failed"),
Self::DecryptionFailed => write!(f, "Decryption failed"),
Self::KeyInvalid => write!(f, "Key invalid"),
Self::JsonSerializationFailed(inner) => write!(f, "JSON Serialization failed: {inner}"),
Self::InvalidEncryptionData(inner) => write!(f, "Invalid encryption data: {inner}"),
Self::HashingFailure(inner) => write!(f, "Hashing failure: {inner}"),
Self::HashVerificationFailure(inner) => write!(f, "Hash verification failure: {inner}"),
}
}
}

156
backend/src/file.rs Normal file
View File

@@ -0,0 +1,156 @@
//! Module containing functions for saving / reading encrypted data on disk
use super::error::{Error, Result};
use crate::configuration::CONFIGURATION;
use serde::{Deserialize, Serialize};
use std::io;
use std::path::PathBuf;
use std::str::FromStr;
use std::{
fs::{self, OpenOptions},
io::{Read, Write},
};
use uuid::Uuid;
/// File metadata that will be stored serialized and encrypted in the database
#[derive(Serialize, Deserialize)]
pub struct Metadata {
/// Name of the uploaded file
pub file_name: String,
/// MIME type of the uploaded file
pub mime_type: String,
}
/// Stores new file on disk
///
/// # Arguments
///
/// * `id` - File id (to use as file name)
/// * `content` - Content to store
///
/// # Returns
///
/// * [`Ok<PathBuf>`] on success with file path
/// * [`Err<Error>`] on error
pub fn store_data(id: &Uuid, content: Vec<u8>) -> Result<PathBuf> {
let mut file_path = CONFIGURATION.file_path.clone();
file_path.push(id.to_string());
let mut file = OpenOptions::new()
.create_new(true)
.write(true)
.open(&file_path)
.map_err(Error::SavingFileFailed)?;
if let Err(error) = file.write_all(&content) {
delete(id)?;
return Err(Error::SavingFileFailed(error));
}
drop(content);
if let Err(error) = file.sync_all() {
delete(id)?;
return Err(Error::SavingFileFailed(error));
}
Ok(file_path)
}
/// Retrieves the Ids of all stored files.
///
/// This function reads the directory and collects the UUIDs of all files stored.
///
/// # Returns
///
/// * [`Ok<Vec<Uuid>>`] - A vector containing the UUIDs of all stored files.
/// * [`Err<Error>`] on error
pub fn get_stored_file_ids() -> Result<Vec<Uuid>> {
let mut file_ids = vec![];
let read_dir =
fs::read_dir(CONFIGURATION.file_path.clone()).map_err(Error::ReadingDirectoryFailed)?;
for dir_entry in read_dir {
let file_name = dir_entry
.map_err(Error::ReadingDirectoryFailed)?
.file_name();
let file_name = file_name
.to_str()
.ok_or(Error::ReadingDirectoryFailed(io::Error::new(
io::ErrorKind::Other,
"Could not get file name",
)))?;
let file_id = Uuid::from_str(file_name).map_err(|_| {
Error::ReadingDirectoryFailed(io::Error::new(
io::ErrorKind::InvalidData,
format!("File name not a Uuid: {file_name}"),
))
})?;
file_ids.push(file_id);
}
Ok(file_ids)
}
/// Load data from disk
///
/// # Arguments
///
/// * `id` - File id
///
/// # Returns
///
/// * [`Ok<Vec<u8>>`] on success, containing file content
/// * [`Err<Error>`] on error
pub fn load_data(id: &Uuid) -> Result<Vec<u8>> {
let mut file_path = CONFIGURATION.file_path.clone();
file_path.push(id.to_string());
let mut content = vec![];
let mut file = OpenOptions::new()
.read(true)
.open(&file_path)
.map_err(Error::LoadingFileFailed)?;
if let Err(error) = file.read_to_end(&mut content) {
return Err(Error::LoadingFileFailed(error));
};
Ok(content)
}
/// Ensure file is deleted
///
/// # Arguments
///
/// * `id` - File id
///
/// # Returns
///
/// * [`Ok<()>`] ensuring file doesn't exist (anymore)
/// * [`Err<Error>`] on error
pub fn delete(id: &Uuid) -> Result<()> {
let mut file_path = CONFIGURATION.file_path.clone();
file_path.push(id.to_string());
if !(fs::exists(&file_path).map_err(Error::DeletingFileFailed)?) {
return Ok(());
}
if !fs::metadata(&file_path)
.map_err(Error::DeletingFileFailed)?
.is_file()
{
return Err(Error::DeletingFileFailed(io::Error::new(
io::ErrorKind::IsADirectory,
"Directory given",
)));
}
fs::remove_file(&file_path).map_err(Error::DeletingFileFailed)
}

View File

@@ -0,0 +1,28 @@
use super::definitions::Hashing;
use crate::error::{Error, Result};
use argon2::password_hash::{
rand_core::OsRng, PasswordHash, PasswordHasher, PasswordVerifier, SaltString,
};
/// A struct representing the Argon2 hashing algorithm.
pub struct Argon2 {}
impl Hashing for Argon2 {
fn hash(data: &[u8]) -> Result<String> {
let salt = SaltString::generate(&mut OsRng);
Ok(argon2::Argon2::default()
.hash_password(data, &salt)
.map_err(|error| Error::HashingFailure(error.to_string()))?
.to_string())
}
fn verify(data: &[u8], hash: &str) -> Result<bool> {
let parsed_hash = PasswordHash::new(hash)
.map_err(|error| Error::HashVerificationFailure(error.to_string()))?;
Ok(argon2::Argon2::default()
.verify_password(data, &parsed_hash)
.is_ok())
}
}

View File

@@ -0,0 +1,30 @@
use crate::error::Result;
/// Provides functions to hash data or to verify hashes
pub trait Hashing {
/// Hashes given `data`
///
/// # Arguments
///
/// * `data` - Data to hash
///
/// # Returns
///
/// * [`Ok<String>`] on success, containing the hash
/// * [`Err<Error>`] on error
fn hash(data: &[u8]) -> Result<String>;
/// Verifies given `data` against `hash`
///
/// # Arguments
///
/// * `data` - Data to verify hash against
/// * `hash` - Hash to verify
///
/// # Returns
///
/// * [`Ok<true>`] on `data` matching `hash`
/// * [`Ok<false>`] on `data` **not** matching `hash`
/// * [`Err<Error>`] on error
fn verify(data: &[u8], hash: &str) -> Result<bool>;
}

9
backend/src/hash/mod.rs Normal file
View File

@@ -0,0 +1,9 @@
//! Hash module.
//!
//! This module provides hashing functionalities. It includes submodules
//! for Argon2 hashing and hash definitions.
mod argon2;
mod definitions;
pub use argon2::Argon2 as Hash;
pub use definitions::*;

105
backend/src/main.rs Normal file
View File

@@ -0,0 +1,105 @@
use configuration::CONFIGURATION;
use migration::{Migrator, MigratorTrait};
use sea_orm::{ConnectOptions, Database, DatabaseConnection};
use std::{process, time::Duration};
use tokio::{signal::ctrl_c, task::JoinSet, sync::broadcast};
mod api;
mod cleanup;
mod configuration;
mod database;
mod encryption;
mod error;
mod file;
mod hash;
mod request;
mod util;
#[tokio::main]
async fn main() {
env_logger::init();
/* Init configuration */
let connection_string = &CONFIGURATION.connection_string;
let database_connection = match setup_database(connection_string).await {
Some(database_connection) => database_connection,
None => {
log::error!("Bye.");
process::exit(1);
}
};
let mut join_set = JoinSet::new();
let (shutdown_tx, _) = broadcast::channel(1);
let api_database_connection = database_connection.clone();
let api_shutdown_rx = shutdown_tx.subscribe();
join_set.spawn(async move {
if let Err(error) = api::listen(api_database_connection, api_shutdown_rx).await {
log::error!("API failed: {error}");
}
});
let cleanup_database_connection = database_connection.clone();
let cleanup_shutdown_rx = shutdown_tx.subscribe();
join_set.spawn(async move {
if let Err(error) = cleanup::run(cleanup_database_connection, cleanup_shutdown_rx).await {
log::error!("Cleanup failed: {:?}", error);
}
});
join_set.spawn(async move {
use log::{error, info};
if let Err(error) = ctrl_c().await {
error!("Could not listen to ctrl+c (SIGINT): {error}");
error!("Exiting process. Bye.");
process::exit(1);
}
info!("Received ctrl+c (SIGINT)");
if let Err(error) = shutdown_tx.send(()) {
log::error!("Could not send shutdown signal: {error}");
}
});
join_set.join_all().await;
log::info!("Closing database connection...");
if let Err(error) = database_connection.close().await {
log::error!("Could not close database connection: {error}");
}
log::info!("Bye.");
}
async fn setup_database(connection_string: &str) -> Option<DatabaseConnection> {
let mut connect_options = ConnectOptions::new(connection_string);
log::info!("Connecting and setting up database (connection timeout is 8 secs)...");
connect_options
.sqlx_logging_level(log::LevelFilter::Debug)
.max_connections(5)
.min_connections(1)
.connect_timeout(Duration::from_secs(8))
.acquire_timeout(Duration::from_secs(8))
.idle_timeout(Duration::from_secs(8))
.max_lifetime(Duration::from_secs(8));
let Ok(database_connection) = Database::connect(connect_options).await else {
log::error!("Could not connect to database. Bye.");
return None;
};
if Migrator::up(&database_connection, None).await.is_err() {
log::error!("Could not migrate database");
return None;
};
Some(database_connection)
}

134
backend/src/request.rs Normal file
View File

@@ -0,0 +1,134 @@
//! Module containing functions that are related to HTTP requests
use super::error::{Error, Result};
use crate::configuration::CONFIGURATION;
use crate::file;
use axum::http::header::{CONTENT_DISPOSITION, CONTENT_TYPE};
use axum::http::HeaderMap;
use regex::Regex;
use std::sync::LazyLock;
use uuid::Uuid;
const FALLBACK_CONTENT_TYPE: &str = "application/octet-stream";
static FILE_NAME_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new("filename=\"(.*?)\"").unwrap());
/// Tries getting request Ip from given `headers`
///
/// The header name defined in [`CONFIGURATION`] will be checked for an (Ip)
/// value and then returned. If the value is missing / empty, an [`Error`] is
/// returned.
///
/// # Arguments
///
/// * `headers` - Headers to check
///
/// # Returns
///
/// * [`Ok<String>`] on success, containing the request Ip
/// * [`Err<Error>`] on error
pub fn get_request_ip(headers: &HeaderMap) -> Result<String> {
Ok(headers
.get(CONFIGURATION.ip_header_name.clone())
.ok_or(Error::IpHeaderMissing(CONFIGURATION.ip_header_name.clone()))?
.to_str()
.map_err(|_| Error::IpHeaderInvalid)?
.to_string())
}
impl From<file::Metadata> for HeaderMap {
fn from(val: file::Metadata) -> Self {
let mut headers = HeaderMap::new();
if let Ok(content_disposition) =
format!("attachment; filename=\"{}\"", val.file_name).parse()
{
headers.append(CONTENT_DISPOSITION, content_disposition);
}
if let Ok(content_type) = val.mime_type.parse() {
headers.append(CONTENT_TYPE, content_type);
}
headers
}
}
impl From<HeaderMap> for file::Metadata {
fn from(value: HeaderMap) -> Self {
let file_name = value
.get(CONTENT_DISPOSITION)
.and_then(|header_value| header_value.to_str().map(String::from).ok())
.and_then(|header_value| {
FILE_NAME_REGEX
.captures(&header_value)
.and_then(|captures| captures.get(1))
.map(|capture| capture.as_str().to_string())
});
let mime_type = value
.get(CONTENT_TYPE)
.and_then(|header_value| header_value.to_str().map(String::from).ok());
Self {
file_name: file_name.unwrap_or(Uuid::new_v4().to_string()),
mime_type: mime_type.unwrap_or(FALLBACK_CONTENT_TYPE.into()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_metadata_to_headers() {
let metadata = file::Metadata {
file_name: "My file.exe".into(),
mime_type: "my/mimetype".into(),
};
let headers: HeaderMap = metadata.into();
assert_eq!(2, headers.len());
assert_eq!(
"attachment; filename=\"My file.exe\"",
headers
.get("Content-Disposition")
.unwrap()
.to_str()
.unwrap()
);
assert_eq!(
"my/mimetype",
headers.get("Content-Type").unwrap().to_str().unwrap()
)
}
#[test]
fn test_from_headers_to_metadata() {
let mut headers = HeaderMap::new();
headers.append(
"Content-Disposition",
"attachment; filename=\"My file.gif\" what=ever"
.parse()
.unwrap(),
);
headers.append("Content-Type", "my/mime+type".parse().unwrap());
let metadata: file::Metadata = headers.into();
assert_eq!("My file.gif", metadata.file_name);
assert_eq!("my/mime+type", metadata.mime_type);
}
#[test]
fn test_with_missing_headers_to_metadata() {
let headers = HeaderMap::new();
let metadata: file::Metadata = headers.into();
assert!(!metadata.file_name.is_empty());
assert_eq!("application/octet-stream", metadata.mime_type);
}
}

58
backend/src/util.rs Normal file
View File

@@ -0,0 +1,58 @@
//! Module with utilites that can't be categorized otherwise
use crate::error::{Error, Result};
use crate::hash::{Hash, Hashing};
use base64::prelude::BASE64_URL_SAFE;
use base64::Engine;
/// Decodes and validates given file encryption key
///
/// Given `encoded_key` is decoded and then checked against given `hash`.
/// If the key is valid, it will be returned, otherwise error.
///
/// # Arguments
///
/// * `encoded_key` - File encryption key to decode and check
/// * `hash` - Hash to check key against
///
/// # Returns
///
/// * [`Ok<Vec<u8>>`] containing decoded and validated key
/// * [`Err<Error>`] on error
pub fn get_validated_key(encoded_key: &str, hash: &str) -> Result<Vec<u8>> {
let key = BASE64_URL_SAFE
.decode(encoded_key)
.map_err(|_| Error::KeyInvalid)?;
match Hash::verify(&key, hash).map_err(|_| Error::KeyInvalid) {
Ok(true) => Ok(key),
_ => Err(Error::KeyInvalid),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn valid_key_returned() {
let result = get_validated_key(
"MQ==", // "1"
"$argon2id$v=19$m=12,t=3,p=1$dzc0OGd1OWZveHMwMDAwMA$c76OJ4RDh1TlW1tdcbimWA",
);
assert!(result.is_ok());
assert_eq!(result.unwrap(), [49]); // ["1"]
}
#[test]
fn invalid_input_handled() {
assert!(get_validated_key("MQ==", "xxxYYY").is_err());
assert!(get_validated_key(
"@@@",
"$argon2id$v=19$m=12,t=3,p=1$dzc0OGd1OWZveHMwMDAwMA$c76OJ4RDh1TlW1tdcbimWA"
)
.is_err());
}
}