mirror of
https://github.com/Dangoware/confetti-box.git
synced 2025-04-19 07:12:58 -05:00
Ran cargo fmt
This commit is contained in:
parent
c63407d466
commit
b8d114dd3d
3 changed files with 55 additions and 44 deletions
|
@ -13,13 +13,13 @@ use ciborium::{from_reader, into_writer};
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
use rand::distributions::{Alphanumeric, DistString};
|
use rand::distributions::{Alphanumeric, DistString};
|
||||||
use rocket::{
|
use rocket::{
|
||||||
form::{self, FromFormField, ValueField}, serde::{Deserialize, Serialize}
|
form::{self, FromFormField, ValueField},
|
||||||
|
serde::{Deserialize, Serialize},
|
||||||
};
|
};
|
||||||
use serde_with::{serde_as, DisplayFromStr};
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
pub struct Mochibase {
|
pub struct Mochibase {
|
||||||
path: PathBuf,
|
path: PathBuf,
|
||||||
|
|
||||||
|
@ -153,8 +153,7 @@ impl Mochibase {
|
||||||
|
|
||||||
/// An entry in the database storing metadata about a file
|
/// An entry in the database storing metadata about a file
|
||||||
#[serde_as]
|
#[serde_as]
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
pub struct MochiFile {
|
pub struct MochiFile {
|
||||||
/// A unique identifier describing this file
|
/// A unique identifier describing this file
|
||||||
mmid: Mmid,
|
mmid: Mmid,
|
||||||
|
@ -265,8 +264,7 @@ pub fn clean_database(db: &Arc<RwLock<Mochibase>>, file_path: &Path) {
|
||||||
|
|
||||||
/// A unique identifier for an entry in the database, 8 characters long,
|
/// A unique identifier for an entry in the database, 8 characters long,
|
||||||
/// consists of ASCII alphanumeric characters (`a-z`, `A-Z`, and `0-9`).
|
/// consists of ASCII alphanumeric characters (`a-z`, `A-Z`, and `0-9`).
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
|
#[derive(Debug, PartialEq, Eq, Clone, Hash, Deserialize, Serialize)]
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
pub struct Mmid(String);
|
pub struct Mmid(String);
|
||||||
|
|
||||||
impl Mmid {
|
impl Mmid {
|
||||||
|
@ -332,9 +330,7 @@ impl std::fmt::Display for Mmid {
|
||||||
#[rocket::async_trait]
|
#[rocket::async_trait]
|
||||||
impl<'r> FromFormField<'r> for Mmid {
|
impl<'r> FromFormField<'r> for Mmid {
|
||||||
fn from_value(field: ValueField<'r>) -> form::Result<'r, Self> {
|
fn from_value(field: ValueField<'r>) -> form::Result<'r, Self> {
|
||||||
Ok(
|
Ok(Self::try_from(field.value).map_err(|_| form::Error::validation("Invalid MMID"))?)
|
||||||
Self::try_from(field.value).map_err(|_| form::Error::validation("Invalid MMID"))?
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -381,8 +377,7 @@ impl Chunkbase {
|
||||||
|
|
||||||
/// Information about how to manage partially uploaded chunks of files
|
/// Information about how to manage partially uploaded chunks of files
|
||||||
#[serde_as]
|
#[serde_as]
|
||||||
#[derive(Default, Debug, Clone)]
|
#[derive(Default, Debug, Clone, Deserialize, Serialize)]
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
pub struct ChunkedInfo {
|
pub struct ChunkedInfo {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub size: u64,
|
pub size: u64,
|
||||||
|
|
57
src/lib.rs
57
src/lib.rs
|
@ -6,7 +6,10 @@ pub mod settings;
|
||||||
pub mod strings;
|
pub mod strings;
|
||||||
pub mod utils;
|
pub mod utils;
|
||||||
|
|
||||||
use std::{io::{self, ErrorKind}, sync::{Arc, RwLock}};
|
use std::{
|
||||||
|
io::{self, ErrorKind},
|
||||||
|
sync::{Arc, RwLock},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
pages::{footer, head},
|
pages::{footer, head},
|
||||||
|
@ -17,7 +20,14 @@ use chrono::{TimeDelta, Utc};
|
||||||
use database::{Chunkbase, ChunkedInfo, Mmid, MochiFile, Mochibase};
|
use database::{Chunkbase, ChunkedInfo, Mmid, MochiFile, Mochibase};
|
||||||
use maud::{html, Markup, PreEscaped};
|
use maud::{html, Markup, PreEscaped};
|
||||||
use rocket::{
|
use rocket::{
|
||||||
data::ToByteUnit, get, post, serde::{json::Json, Serialize}, tokio::{fs, io::{AsyncSeekExt, AsyncWriteExt}}, Data, State
|
data::ToByteUnit,
|
||||||
|
get, post,
|
||||||
|
serde::{json::Json, Serialize},
|
||||||
|
tokio::{
|
||||||
|
fs,
|
||||||
|
io::{AsyncSeekExt, AsyncWriteExt},
|
||||||
|
},
|
||||||
|
Data, State,
|
||||||
};
|
};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
@ -98,15 +108,18 @@ pub async fn chunked_upload_start(
|
||||||
mut file_info: Json<ChunkedInfo>,
|
mut file_info: Json<ChunkedInfo>,
|
||||||
) -> Result<Json<ChunkedResponse>, std::io::Error> {
|
) -> Result<Json<ChunkedResponse>, std::io::Error> {
|
||||||
let uuid = Uuid::new_v4();
|
let uuid = Uuid::new_v4();
|
||||||
file_info.path = settings
|
file_info.path = settings.temp_dir.join(uuid.to_string());
|
||||||
.temp_dir
|
|
||||||
.join(uuid.to_string());
|
|
||||||
|
|
||||||
// Perform some sanity checks
|
// Perform some sanity checks
|
||||||
if file_info.size > settings.max_filesize {
|
if file_info.size > settings.max_filesize {
|
||||||
return Ok(Json(ChunkedResponse::failure("File too large")));
|
return Ok(Json(ChunkedResponse::failure("File too large")));
|
||||||
}
|
}
|
||||||
if settings.duration.restrict_to_allowed && !settings.duration.allowed.contains(&file_info.expire_duration) {
|
if settings.duration.restrict_to_allowed
|
||||||
|
&& !settings
|
||||||
|
.duration
|
||||||
|
.allowed
|
||||||
|
.contains(&file_info.expire_duration)
|
||||||
|
{
|
||||||
return Ok(Json(ChunkedResponse::failure("Duration not allowed")));
|
return Ok(Json(ChunkedResponse::failure("Duration not allowed")));
|
||||||
}
|
}
|
||||||
if file_info.expire_duration > settings.duration.maximum {
|
if file_info.expire_duration > settings.duration.maximum {
|
||||||
|
@ -115,10 +128,10 @@ pub async fn chunked_upload_start(
|
||||||
|
|
||||||
fs::File::create_new(&file_info.path).await?;
|
fs::File::create_new(&file_info.path).await?;
|
||||||
|
|
||||||
db.write()
|
db.write().unwrap().mut_chunks().insert(
|
||||||
.unwrap()
|
uuid,
|
||||||
.mut_chunks()
|
(Utc::now() + TimeDelta::seconds(30), file_info.into_inner()),
|
||||||
.insert(uuid, (Utc::now() + TimeDelta::seconds(30), file_info.into_inner()));
|
);
|
||||||
|
|
||||||
Ok(Json(ChunkedResponse {
|
Ok(Json(ChunkedResponse {
|
||||||
status: true,
|
status: true,
|
||||||
|
@ -152,7 +165,10 @@ pub async fn chunked_upload_continue(
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if offset > chunked_info.1.size {
|
if offset > chunked_info.1.size {
|
||||||
return Err(io::Error::new(ErrorKind::InvalidInput, "The seek position is larger than the file size"))
|
return Err(io::Error::new(
|
||||||
|
ErrorKind::InvalidInput,
|
||||||
|
"The seek position is larger than the file size",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
file.seek(io::SeekFrom::Start(offset)).await?;
|
file.seek(io::SeekFrom::Start(offset)).await?;
|
||||||
|
@ -161,11 +177,8 @@ pub async fn chunked_upload_continue(
|
||||||
let position = file.stream_position().await?;
|
let position = file.stream_position().await?;
|
||||||
|
|
||||||
if position > chunked_info.1.size {
|
if position > chunked_info.1.size {
|
||||||
chunk_db.write()
|
chunk_db.write().unwrap().mut_chunks().remove(&uuid);
|
||||||
.unwrap()
|
return Err(io::Error::other("File larger than expected"));
|
||||||
.mut_chunks()
|
|
||||||
.remove(&uuid);
|
|
||||||
return Err(io::Error::other("File larger than expected"))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -187,14 +200,15 @@ pub async fn chunked_upload_finish(
|
||||||
};
|
};
|
||||||
|
|
||||||
// Remove the finished chunk from the db
|
// Remove the finished chunk from the db
|
||||||
chunk_db.write()
|
chunk_db
|
||||||
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.mut_chunks()
|
.mut_chunks()
|
||||||
.remove(&uuid)
|
.remove(&uuid)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
if !chunked_info.1.path.try_exists().is_ok_and(|e| e) {
|
if !chunked_info.1.path.try_exists().is_ok_and(|e| e) {
|
||||||
return Err(io::Error::other("File does not exist"))
|
return Err(io::Error::other("File does not exist"));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get file hash
|
// Get file hash
|
||||||
|
@ -220,10 +234,13 @@ pub async fn chunked_upload_finish(
|
||||||
file_type.media_type().to_string(),
|
file_type.media_type().to_string(),
|
||||||
hash,
|
hash,
|
||||||
now,
|
now,
|
||||||
now + chunked_info.1.expire_duration
|
now + chunked_info.1.expire_duration,
|
||||||
);
|
);
|
||||||
|
|
||||||
main_db.write().unwrap().insert(&mmid, constructed_file.clone());
|
main_db
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.insert(&mmid, constructed_file.clone());
|
||||||
|
|
||||||
Ok(Json(constructed_file))
|
Ok(Json(constructed_file))
|
||||||
}
|
}
|
||||||
|
|
23
src/main.rs
23
src/main.rs
|
@ -1,5 +1,7 @@
|
||||||
use std::{
|
use std::{
|
||||||
fs, path::PathBuf, sync::{Arc, RwLock}
|
fs,
|
||||||
|
path::PathBuf,
|
||||||
|
sync::{Arc, RwLock},
|
||||||
};
|
};
|
||||||
|
|
||||||
use chrono::TimeDelta;
|
use chrono::TimeDelta;
|
||||||
|
@ -9,7 +11,11 @@ use confetti_box::{
|
||||||
settings::Settings,
|
settings::Settings,
|
||||||
};
|
};
|
||||||
use log::info;
|
use log::info;
|
||||||
use rocket::{data::ToByteUnit as _, routes, tokio::{self, select, sync::broadcast::Receiver, time}};
|
use rocket::{
|
||||||
|
data::ToByteUnit as _,
|
||||||
|
routes,
|
||||||
|
tokio::{self, select, sync::broadcast::Receiver, time},
|
||||||
|
};
|
||||||
|
|
||||||
#[rocket::main]
|
#[rocket::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
|
@ -38,9 +44,7 @@ async fn main() {
|
||||||
let database = Arc::new(RwLock::new(
|
let database = Arc::new(RwLock::new(
|
||||||
Mochibase::open_or_new(&config.database_path).expect("Failed to open or create database"),
|
Mochibase::open_or_new(&config.database_path).expect("Failed to open or create database"),
|
||||||
));
|
));
|
||||||
let chunkbase = Arc::new(RwLock::new(
|
let chunkbase = Arc::new(RwLock::new(Chunkbase::default()));
|
||||||
Chunkbase::default(),
|
|
||||||
));
|
|
||||||
let local_db = database.clone();
|
let local_db = database.clone();
|
||||||
let local_chunk = chunkbase.clone();
|
let local_chunk = chunkbase.clone();
|
||||||
|
|
||||||
|
@ -95,9 +99,7 @@ async fn main() {
|
||||||
rocket.expect("Server failed to shutdown gracefully");
|
rocket.expect("Server failed to shutdown gracefully");
|
||||||
|
|
||||||
info!("Stopping database cleaning thread...");
|
info!("Stopping database cleaning thread...");
|
||||||
shutdown
|
shutdown.send(()).expect("Failed to stop cleaner thread.");
|
||||||
.send(())
|
|
||||||
.expect("Failed to stop cleaner thread.");
|
|
||||||
info!("Stopping database cleaning thread completed successfully.");
|
info!("Stopping database cleaning thread completed successfully.");
|
||||||
|
|
||||||
info!("Saving database on shutdown...");
|
info!("Saving database on shutdown...");
|
||||||
|
@ -132,10 +134,7 @@ pub async fn clean_loop(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn clean_chunks(
|
pub async fn clean_chunks(chunk_db: Arc<RwLock<Chunkbase>>, mut shutdown_signal: Receiver<()>) {
|
||||||
chunk_db: Arc<RwLock<Chunkbase>>,
|
|
||||||
mut shutdown_signal: Receiver<()>,
|
|
||||||
) {
|
|
||||||
let mut interval = time::interval(TimeDelta::seconds(30).to_std().unwrap());
|
let mut interval = time::interval(TimeDelta::seconds(30).to_std().unwrap());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
|
|
Loading…
Reference in a new issue