mirror of
https://github.com/Dangoware/confetti-box.git
synced 2025-04-19 23:32:58 -05:00
Improved setup for chunk downloading, added chunk size to config
This commit is contained in:
parent
4ca0f50685
commit
b386cd8340
5 changed files with 55 additions and 136 deletions
|
@ -369,6 +369,17 @@ impl Chunkbase {
|
||||||
pub fn mut_chunks(&mut self) -> &mut HashMap<Uuid, ChunkedInfo> {
|
pub fn mut_chunks(&mut self) -> &mut HashMap<Uuid, ChunkedInfo> {
|
||||||
&mut self.chunks
|
&mut self.chunks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Delete all temporary chunk files
|
||||||
|
pub fn delete_all(&mut self) -> Result<(), io::Error> {
|
||||||
|
for chunk in &self.chunks {
|
||||||
|
fs::remove_file(&chunk.1.path)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.chunks.clear();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Information about how to manage partially uploaded chunks of files
|
/// Information about how to manage partially uploaded chunks of files
|
||||||
|
|
146
src/lib.rs
146
src/lib.rs
|
@ -17,9 +17,8 @@ use chrono::Utc;
|
||||||
use database::{Chunkbase, ChunkedInfo, Mmid, MochiFile, Mochibase};
|
use database::{Chunkbase, ChunkedInfo, Mmid, MochiFile, Mochibase};
|
||||||
use maud::{html, Markup, PreEscaped};
|
use maud::{html, Markup, PreEscaped};
|
||||||
use rocket::{
|
use rocket::{
|
||||||
data::{ByteUnit, ToByteUnit}, get, post, serde::{json::Json, Serialize}, tokio::{fs, io::{AsyncSeekExt, AsyncWriteExt}}, Data, State
|
data::ToByteUnit, get, post, serde::{json::Json, Serialize}, tokio::{fs, io::{AsyncSeekExt, AsyncWriteExt}}, Data, State
|
||||||
};
|
};
|
||||||
use utils::hash_file;
|
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
#[get("/")]
|
#[get("/")]
|
||||||
|
@ -66,117 +65,6 @@ pub fn home(settings: &State<Settings>) -> Markup {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
#[derive(Debug, FromForm)]
|
|
||||||
pub struct Upload<'r> {
|
|
||||||
#[field(name = "fileUpload")]
|
|
||||||
file: TempFile<'r>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A response to the client from the server
|
|
||||||
#[derive(Serialize, Default, Debug)]
|
|
||||||
pub struct ClientResponse {
|
|
||||||
/// Success or failure
|
|
||||||
pub status: bool,
|
|
||||||
|
|
||||||
pub response: &'static str,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "str::is_empty")]
|
|
||||||
pub name: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mmid: Option<Mmid>,
|
|
||||||
#[serde(skip_serializing_if = "str::is_empty")]
|
|
||||||
pub hash: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub expires: Option<DateTime<Utc>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClientResponse {
|
|
||||||
fn failure(response: &'static str) -> Self {
|
|
||||||
Self {
|
|
||||||
status: false,
|
|
||||||
response,
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle a file upload and store it
|
|
||||||
#[post("/upload?<expire_time>", data = "<file_data>")]
|
|
||||||
pub async fn handle_upload(
|
|
||||||
expire_time: String,
|
|
||||||
mut file_data: Form<Upload<'_>>,
|
|
||||||
db: &State<Arc<RwLock<Mochibase>>>,
|
|
||||||
settings: &State<Settings>,
|
|
||||||
) -> Result<Json<ClientResponse>, std::io::Error> {
|
|
||||||
let current = Utc::now();
|
|
||||||
// Ensure the expiry time is valid, if not return an error
|
|
||||||
let expire_time = if let Ok(t) = parse_time_string(&expire_time) {
|
|
||||||
if settings.duration.restrict_to_allowed && !settings.duration.allowed.contains(&t) {
|
|
||||||
return Ok(Json(ClientResponse::failure("Duration not allowed")));
|
|
||||||
}
|
|
||||||
|
|
||||||
if t > settings.duration.maximum {
|
|
||||||
return Ok(Json(ClientResponse::failure("Duration larger than max")));
|
|
||||||
}
|
|
||||||
|
|
||||||
t
|
|
||||||
} else {
|
|
||||||
return Ok(Json(ClientResponse::failure("Duration invalid")));
|
|
||||||
};
|
|
||||||
|
|
||||||
let raw_name = file_data
|
|
||||||
.file
|
|
||||||
.raw_name()
|
|
||||||
.unwrap()
|
|
||||||
.dangerous_unsafe_unsanitized_raw()
|
|
||||||
.as_str()
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
// Get temp path for the file
|
|
||||||
let temp_filename = settings.temp_dir.join(Uuid::new_v4().to_string());
|
|
||||||
file_data.file.persist_to(&temp_filename).await?;
|
|
||||||
|
|
||||||
// Get hash and random identifier and expiry
|
|
||||||
let file_mmid = Mmid::new_random();
|
|
||||||
let file_hash = hash_file(&temp_filename).await?;
|
|
||||||
let expiry = current + expire_time;
|
|
||||||
|
|
||||||
// Process filetype
|
|
||||||
let file_type = file_format::FileFormat::from_file(&temp_filename)?;
|
|
||||||
|
|
||||||
let constructed_file = MochiFile::new(
|
|
||||||
file_mmid.clone(),
|
|
||||||
raw_name,
|
|
||||||
file_type.media_type().to_string(),
|
|
||||||
file_hash,
|
|
||||||
current,
|
|
||||||
expiry,
|
|
||||||
);
|
|
||||||
|
|
||||||
// If the hash does not exist in the database,
|
|
||||||
// move the file to the backend, else, delete it
|
|
||||||
if db.read().unwrap().get_hash(&file_hash).is_none() {
|
|
||||||
std::fs::rename(temp_filename, settings.file_dir.join(file_hash.to_string()))?;
|
|
||||||
} else {
|
|
||||||
std::fs::remove_file(temp_filename)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.write()
|
|
||||||
.unwrap()
|
|
||||||
.insert(&file_mmid, constructed_file.clone());
|
|
||||||
|
|
||||||
Ok(Json(ClientResponse {
|
|
||||||
status: true,
|
|
||||||
name: constructed_file.name().clone(),
|
|
||||||
mmid: Some(constructed_file.mmid().clone()),
|
|
||||||
hash: constructed_file.hash().to_string(),
|
|
||||||
expires: Some(constructed_file.expiry()),
|
|
||||||
..Default::default()
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
#[derive(Serialize, Default)]
|
#[derive(Serialize, Default)]
|
||||||
pub struct ChunkedResponse {
|
pub struct ChunkedResponse {
|
||||||
status: bool,
|
status: bool,
|
||||||
|
@ -188,7 +76,7 @@ pub struct ChunkedResponse {
|
||||||
|
|
||||||
/// Valid max chunk size in bytes
|
/// Valid max chunk size in bytes
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
chunk_size: Option<ByteUnit>,
|
chunk_size: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChunkedResponse {
|
impl ChunkedResponse {
|
||||||
|
@ -225,6 +113,8 @@ pub async fn chunked_upload_start(
|
||||||
return Ok(Json(ChunkedResponse::failure("Duration too large")));
|
return Ok(Json(ChunkedResponse::failure("Duration too large")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fs::File::create_new(&file_info.path).await?;
|
||||||
|
|
||||||
db.write()
|
db.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.mut_chunks()
|
.mut_chunks()
|
||||||
|
@ -234,35 +124,32 @@ pub async fn chunked_upload_start(
|
||||||
status: true,
|
status: true,
|
||||||
message: "".into(),
|
message: "".into(),
|
||||||
uuid: Some(uuid),
|
uuid: Some(uuid),
|
||||||
chunk_size: Some(100.megabytes()),
|
chunk_size: Some(settings.chunk_size),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/upload/chunked/<uuid>?<offset>", data = "<data>")]
|
#[post("/upload/chunked/<uuid>?<offset>", data = "<data>")]
|
||||||
pub async fn chunked_upload_continue(
|
pub async fn chunked_upload_continue(
|
||||||
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
||||||
|
settings: &State<Settings>,
|
||||||
data: Data<'_>,
|
data: Data<'_>,
|
||||||
uuid: &str,
|
uuid: &str,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
) -> Result<(), io::Error> {
|
) -> Result<(), io::Error> {
|
||||||
let uuid = Uuid::parse_str(&uuid).map_err(|e| io::Error::other(e))?;
|
let uuid = Uuid::parse_str(&uuid).map_err(|e| io::Error::other(e))?;
|
||||||
let data_stream = data.open(101.megabytes());
|
let data_stream = data.open((settings.chunk_size + 100).bytes());
|
||||||
|
|
||||||
let chunked_info = match chunk_db.read().unwrap().chunks().get(&uuid) {
|
let chunked_info = match chunk_db.read().unwrap().chunks().get(&uuid) {
|
||||||
Some(s) => s.clone(),
|
Some(s) => s.clone(),
|
||||||
None => return Err(io::Error::other("Invalid UUID")),
|
None => return Err(io::Error::other("Invalid UUID")),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut file = if !chunked_info.path.try_exists().is_ok_and(|e| e) {
|
let mut file = fs::File::options()
|
||||||
fs::File::create_new(&chunked_info.path).await?
|
|
||||||
} else {
|
|
||||||
fs::File::options()
|
|
||||||
.read(true)
|
.read(true)
|
||||||
.write(true)
|
.write(true)
|
||||||
.truncate(false)
|
.truncate(false)
|
||||||
.open(&chunked_info.path)
|
.open(&chunked_info.path)
|
||||||
.await?
|
.await?;
|
||||||
};
|
|
||||||
|
|
||||||
if offset > chunked_info.size {
|
if offset > chunked_info.size {
|
||||||
return Err(io::Error::new(ErrorKind::InvalidInput, "The seek position is larger than the file size"))
|
return Err(io::Error::new(ErrorKind::InvalidInput, "The seek position is larger than the file size"))
|
||||||
|
@ -310,18 +197,23 @@ pub async fn chunked_upload_finish(
|
||||||
return Err(io::Error::other("File does not exist"))
|
return Err(io::Error::other("File does not exist"))
|
||||||
}
|
}
|
||||||
|
|
||||||
let hash = hash_file(&chunked_info.path).await?;
|
// Get file hash
|
||||||
let mmid = Mmid::new_random();
|
let mut hasher = blake3::Hasher::new();
|
||||||
let file_type = file_format::FileFormat::from_file(&chunked_info.path)?;
|
hasher.update_mmap_rayon(&chunked_info.path).unwrap();
|
||||||
|
let hash = hasher.finalize();
|
||||||
|
let new_filename = settings.file_dir.join(hash.to_string());
|
||||||
|
|
||||||
// If the hash does not exist in the database,
|
// If the hash does not exist in the database,
|
||||||
// move the file to the backend, else, delete it
|
// move the file to the backend, else, delete it
|
||||||
if main_db.read().unwrap().get_hash(&hash).is_none() {
|
if main_db.read().unwrap().get_hash(&hash).is_none() {
|
||||||
std::fs::rename(chunked_info.path, settings.file_dir.join(hash.to_string()))?;
|
std::fs::rename(&chunked_info.path, &new_filename).unwrap();
|
||||||
} else {
|
} else {
|
||||||
std::fs::remove_file(chunked_info.path)?;
|
std::fs::remove_file(&chunked_info.path).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mmid = Mmid::new_random();
|
||||||
|
let file_type = file_format::FileFormat::from_file(&new_filename).unwrap();
|
||||||
|
|
||||||
let constructed_file = MochiFile::new(
|
let constructed_file = MochiFile::new(
|
||||||
mmid.clone(),
|
mmid.clone(),
|
||||||
chunked_info.name,
|
chunked_info.name,
|
||||||
|
|
|
@ -43,6 +43,7 @@ async fn main() {
|
||||||
Chunkbase::default(),
|
Chunkbase::default(),
|
||||||
));
|
));
|
||||||
let local_db = database.clone();
|
let local_db = database.clone();
|
||||||
|
let local_chunk = chunkbase.clone();
|
||||||
|
|
||||||
// Start monitoring thread, cleaning the database every 2 minutes
|
// Start monitoring thread, cleaning the database every 2 minutes
|
||||||
let (shutdown, rx) = tokio::sync::mpsc::channel(1);
|
let (shutdown, rx) = tokio::sync::mpsc::channel(1);
|
||||||
|
@ -102,4 +103,12 @@ async fn main() {
|
||||||
.save()
|
.save()
|
||||||
.expect("Failed to save database");
|
.expect("Failed to save database");
|
||||||
info!("Saving database completed successfully.");
|
info!("Saving database completed successfully.");
|
||||||
|
|
||||||
|
info!("Deleting chunk data on shutdown...");
|
||||||
|
local_chunk
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.delete_all()
|
||||||
|
.expect("Failed to delete chunks");
|
||||||
|
info!("Deleting chunk data completed successfully.");
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,8 @@ pub fn head(page_title: &str) -> Markup {
|
||||||
title { (page_title) }
|
title { (page_title) }
|
||||||
link rel="icon" type="image/svg+xml" href="/resources/favicon.svg";
|
link rel="icon" type="image/svg+xml" href="/resources/favicon.svg";
|
||||||
link rel="stylesheet" href="/resources/main.css";
|
link rel="stylesheet" href="/resources/main.css";
|
||||||
|
link rel="preload" href="/resources/fonts/Roboto.woff2" as="font" type="font/woff2" crossorigin;
|
||||||
|
link rel="preload" href="/resources/fonts/FiraCode.woff2" as="font" type="font/woff2" crossorigin;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,10 @@ pub struct Settings {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub max_filesize: u64,
|
pub max_filesize: u64,
|
||||||
|
|
||||||
|
/// Maximum filesize in bytes
|
||||||
|
#[serde(default)]
|
||||||
|
pub chunk_size: u64,
|
||||||
|
|
||||||
/// Is overwiting already uploaded files with the same hash allowed, or is
|
/// Is overwiting already uploaded files with the same hash allowed, or is
|
||||||
/// this a no-op?
|
/// this a no-op?
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
|
@ -48,7 +52,8 @@ pub struct Settings {
|
||||||
impl Default for Settings {
|
impl Default for Settings {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
max_filesize: 1.megabytes().into(), // 1 MB
|
max_filesize: 25.megabytes().into(), // 1 MB
|
||||||
|
chunk_size: 1.megabytes().into(),
|
||||||
overwrite: true,
|
overwrite: true,
|
||||||
duration: DurationSettings::default(),
|
duration: DurationSettings::default(),
|
||||||
server: ServerSettings::default(),
|
server: ServerSettings::default(),
|
||||||
|
@ -81,12 +86,12 @@ impl Settings {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save(&self) -> Result<(), io::Error> {
|
pub fn save(&self) -> Result<(), io::Error> {
|
||||||
let mut out_path = self.path.clone();
|
let out_path = &self.path.with_extension("new");
|
||||||
out_path.set_extension(".bkp");
|
let mut file = File::create(&out_path)?;
|
||||||
let mut file = File::create(&out_path).expect("Could not save!");
|
|
||||||
file.write_all(&toml::to_string_pretty(self).unwrap().into_bytes())?;
|
file.write_all(&toml::to_string_pretty(self).unwrap().into_bytes())?;
|
||||||
|
|
||||||
fs::rename(out_path, &self.path).unwrap();
|
// Overwrite the original DB with
|
||||||
|
fs::rename(&out_path, &self.path).unwrap();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue