From b8d114dd3d09c3ff33dd4e7e5148469f2cf3b5bc Mon Sep 17 00:00:00 2001 From: G2-Games Date: Sun, 3 Nov 2024 05:46:33 -0600 Subject: [PATCH] Ran `cargo fmt` --- src/database.rs | 19 ++++++----------- src/lib.rs | 57 ++++++++++++++++++++++++++++++++----------------- src/main.rs | 23 ++++++++++---------- 3 files changed, 55 insertions(+), 44 deletions(-) diff --git a/src/database.rs b/src/database.rs index 0cc168d..34f846f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -13,13 +13,13 @@ use ciborium::{from_reader, into_writer}; use log::{error, info, warn}; use rand::distributions::{Alphanumeric, DistString}; use rocket::{ - form::{self, FromFormField, ValueField}, serde::{Deserialize, Serialize} + form::{self, FromFormField, ValueField}, + serde::{Deserialize, Serialize}, }; use serde_with::{serde_as, DisplayFromStr}; use uuid::Uuid; -#[derive(Debug, Clone)] -#[derive(Deserialize, Serialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct Mochibase { path: PathBuf, @@ -153,8 +153,7 @@ impl Mochibase { /// An entry in the database storing metadata about a file #[serde_as] -#[derive(Debug, Clone)] -#[derive(Deserialize, Serialize)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct MochiFile { /// A unique identifier describing this file mmid: Mmid, @@ -265,8 +264,7 @@ pub fn clean_database(db: &Arc>, file_path: &Path) { /// A unique identifier for an entry in the database, 8 characters long, /// consists of ASCII alphanumeric characters (`a-z`, `A-Z`, and `0-9`). -#[derive(Debug, PartialEq, Eq, Clone, Hash)] -#[derive(Deserialize, Serialize)] +#[derive(Debug, PartialEq, Eq, Clone, Hash, Deserialize, Serialize)] pub struct Mmid(String); impl Mmid { @@ -332,9 +330,7 @@ impl std::fmt::Display for Mmid { #[rocket::async_trait] impl<'r> FromFormField<'r> for Mmid { fn from_value(field: ValueField<'r>) -> form::Result<'r, Self> { - Ok( - Self::try_from(field.value).map_err(|_| form::Error::validation("Invalid MMID"))? - ) + Ok(Self::try_from(field.value).map_err(|_| form::Error::validation("Invalid MMID"))?) } } @@ -381,8 +377,7 @@ impl Chunkbase { /// Information about how to manage partially uploaded chunks of files #[serde_as] -#[derive(Default, Debug, Clone)] -#[derive(Deserialize, Serialize)] +#[derive(Default, Debug, Clone, Deserialize, Serialize)] pub struct ChunkedInfo { pub name: String, pub size: u64, diff --git a/src/lib.rs b/src/lib.rs index 19247f6..36cea50 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,10 @@ pub mod settings; pub mod strings; pub mod utils; -use std::{io::{self, ErrorKind}, sync::{Arc, RwLock}}; +use std::{ + io::{self, ErrorKind}, + sync::{Arc, RwLock}, +}; use crate::{ pages::{footer, head}, @@ -17,7 +20,14 @@ use chrono::{TimeDelta, Utc}; use database::{Chunkbase, ChunkedInfo, Mmid, MochiFile, Mochibase}; use maud::{html, Markup, PreEscaped}; use rocket::{ - data::ToByteUnit, get, post, serde::{json::Json, Serialize}, tokio::{fs, io::{AsyncSeekExt, AsyncWriteExt}}, Data, State + data::ToByteUnit, + get, post, + serde::{json::Json, Serialize}, + tokio::{ + fs, + io::{AsyncSeekExt, AsyncWriteExt}, + }, + Data, State, }; use uuid::Uuid; @@ -98,15 +108,18 @@ pub async fn chunked_upload_start( mut file_info: Json, ) -> Result, std::io::Error> { let uuid = Uuid::new_v4(); - file_info.path = settings - .temp_dir - .join(uuid.to_string()); + file_info.path = settings.temp_dir.join(uuid.to_string()); // Perform some sanity checks if file_info.size > settings.max_filesize { return Ok(Json(ChunkedResponse::failure("File too large"))); } - if settings.duration.restrict_to_allowed && !settings.duration.allowed.contains(&file_info.expire_duration) { + if settings.duration.restrict_to_allowed + && !settings + .duration + .allowed + .contains(&file_info.expire_duration) + { return Ok(Json(ChunkedResponse::failure("Duration not allowed"))); } if file_info.expire_duration > settings.duration.maximum { @@ -115,10 +128,10 @@ pub async fn chunked_upload_start( fs::File::create_new(&file_info.path).await?; - db.write() - .unwrap() - .mut_chunks() - .insert(uuid, (Utc::now() + TimeDelta::seconds(30), file_info.into_inner())); + db.write().unwrap().mut_chunks().insert( + uuid, + (Utc::now() + TimeDelta::seconds(30), file_info.into_inner()), + ); Ok(Json(ChunkedResponse { status: true, @@ -152,7 +165,10 @@ pub async fn chunked_upload_continue( .await?; if offset > chunked_info.1.size { - return Err(io::Error::new(ErrorKind::InvalidInput, "The seek position is larger than the file size")) + return Err(io::Error::new( + ErrorKind::InvalidInput, + "The seek position is larger than the file size", + )); } file.seek(io::SeekFrom::Start(offset)).await?; @@ -161,11 +177,8 @@ pub async fn chunked_upload_continue( let position = file.stream_position().await?; if position > chunked_info.1.size { - chunk_db.write() - .unwrap() - .mut_chunks() - .remove(&uuid); - return Err(io::Error::other("File larger than expected")) + chunk_db.write().unwrap().mut_chunks().remove(&uuid); + return Err(io::Error::other("File larger than expected")); } Ok(()) @@ -187,14 +200,15 @@ pub async fn chunked_upload_finish( }; // Remove the finished chunk from the db - chunk_db.write() + chunk_db + .write() .unwrap() .mut_chunks() .remove(&uuid) .unwrap(); if !chunked_info.1.path.try_exists().is_ok_and(|e| e) { - return Err(io::Error::other("File does not exist")) + return Err(io::Error::other("File does not exist")); } // Get file hash @@ -220,10 +234,13 @@ pub async fn chunked_upload_finish( file_type.media_type().to_string(), hash, now, - now + chunked_info.1.expire_duration + now + chunked_info.1.expire_duration, ); - main_db.write().unwrap().insert(&mmid, constructed_file.clone()); + main_db + .write() + .unwrap() + .insert(&mmid, constructed_file.clone()); Ok(Json(constructed_file)) } diff --git a/src/main.rs b/src/main.rs index fb4151e..f99bf5d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,7 @@ use std::{ - fs, path::PathBuf, sync::{Arc, RwLock} + fs, + path::PathBuf, + sync::{Arc, RwLock}, }; use chrono::TimeDelta; @@ -9,7 +11,11 @@ use confetti_box::{ settings::Settings, }; use log::info; -use rocket::{data::ToByteUnit as _, routes, tokio::{self, select, sync::broadcast::Receiver, time}}; +use rocket::{ + data::ToByteUnit as _, + routes, + tokio::{self, select, sync::broadcast::Receiver, time}, +}; #[rocket::main] async fn main() { @@ -38,9 +44,7 @@ async fn main() { let database = Arc::new(RwLock::new( Mochibase::open_or_new(&config.database_path).expect("Failed to open or create database"), )); - let chunkbase = Arc::new(RwLock::new( - Chunkbase::default(), - )); + let chunkbase = Arc::new(RwLock::new(Chunkbase::default())); let local_db = database.clone(); let local_chunk = chunkbase.clone(); @@ -95,9 +99,7 @@ async fn main() { rocket.expect("Server failed to shutdown gracefully"); info!("Stopping database cleaning thread..."); - shutdown - .send(()) - .expect("Failed to stop cleaner thread."); + shutdown.send(()).expect("Failed to stop cleaner thread."); info!("Stopping database cleaning thread completed successfully."); info!("Saving database on shutdown..."); @@ -132,10 +134,7 @@ pub async fn clean_loop( } } -pub async fn clean_chunks( - chunk_db: Arc>, - mut shutdown_signal: Receiver<()>, -) { +pub async fn clean_chunks(chunk_db: Arc>, mut shutdown_signal: Receiver<()>) { let mut interval = time::interval(TimeDelta::seconds(30).to_std().unwrap()); loop {