mirror of
https://github.com/Dangoware/confetti-box.git
synced 2025-04-19 23:32:58 -05:00
Changed chunked upload URL to take chunk number instead of offset
This commit is contained in:
parent
e78c61b410
commit
60dd2ea421
5 changed files with 33 additions and 19 deletions
|
@ -384,6 +384,10 @@ pub struct ChunkedInfo {
|
||||||
#[serde_as(as = "serde_with::DurationSeconds<i64>")]
|
#[serde_as(as = "serde_with::DurationSeconds<i64>")]
|
||||||
pub expire_duration: TimeDelta,
|
pub expire_duration: TimeDelta,
|
||||||
|
|
||||||
|
/// Tracks which chunks have already been recieved, so you can't overwrite
|
||||||
|
/// some wrong part of a file
|
||||||
|
#[serde(skip)]
|
||||||
|
pub recieved_chunks: HashSet<u64>,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
pub path: PathBuf,
|
pub path: PathBuf,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
|
|
27
src/lib.rs
27
src/lib.rs
|
@ -141,13 +141,13 @@ pub async fn chunked_upload_start(
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/upload/chunked/<uuid>?<offset>", data = "<data>")]
|
#[post("/upload/chunked/<uuid>?<chunk>", data = "<data>")]
|
||||||
pub async fn chunked_upload_continue(
|
pub async fn chunked_upload_continue(
|
||||||
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
||||||
settings: &State<Settings>,
|
settings: &State<Settings>,
|
||||||
data: Data<'_>,
|
data: Data<'_>,
|
||||||
uuid: &str,
|
uuid: &str,
|
||||||
offset: u64,
|
chunk: u64,
|
||||||
) -> Result<(), io::Error> {
|
) -> Result<(), io::Error> {
|
||||||
let uuid = Uuid::parse_str(uuid).map_err(io::Error::other)?;
|
let uuid = Uuid::parse_str(uuid).map_err(io::Error::other)?;
|
||||||
let data_stream = data.open((settings.chunk_size + 100).bytes());
|
let data_stream = data.open((settings.chunk_size + 100).bytes());
|
||||||
|
@ -156,6 +156,9 @@ pub async fn chunked_upload_continue(
|
||||||
Some(s) => s.clone(),
|
Some(s) => s.clone(),
|
||||||
None => return Err(io::Error::other("Invalid UUID")),
|
None => return Err(io::Error::other("Invalid UUID")),
|
||||||
};
|
};
|
||||||
|
if chunked_info.1.recieved_chunks.contains(&chunk) {
|
||||||
|
return Err(io::Error::new(ErrorKind::Other, "Chunk already uploaded"));
|
||||||
|
}
|
||||||
|
|
||||||
let mut file = fs::File::options()
|
let mut file = fs::File::options()
|
||||||
.read(true)
|
.read(true)
|
||||||
|
@ -164,22 +167,36 @@ pub async fn chunked_upload_continue(
|
||||||
.open(&chunked_info.1.path)
|
.open(&chunked_info.1.path)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if offset > chunked_info.1.size {
|
let offset = chunk * settings.chunk_size;
|
||||||
|
if (offset > chunked_info.1.size) | (offset > settings.max_filesize) {
|
||||||
return Err(io::Error::new(
|
return Err(io::Error::new(
|
||||||
ErrorKind::InvalidInput,
|
ErrorKind::InvalidInput,
|
||||||
"The seek position is larger than the file size",
|
"Invalid chunk number for file",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
file.seek(io::SeekFrom::Start(offset)).await?;
|
file.seek(io::SeekFrom::Start(offset)).await?;
|
||||||
data_stream.stream_to(&mut file).await?;
|
let written = data_stream.stream_to(&mut file).await?.written;
|
||||||
file.flush().await?;
|
file.flush().await?;
|
||||||
let position = file.stream_position().await?;
|
let position = file.stream_position().await?;
|
||||||
|
|
||||||
|
if written > settings.chunk_size {
|
||||||
|
chunk_db.write().unwrap().mut_chunks().remove(&uuid);
|
||||||
|
return Err(io::Error::other("Wrote more than one chunk"));
|
||||||
|
}
|
||||||
if position > chunked_info.1.size {
|
if position > chunked_info.1.size {
|
||||||
chunk_db.write().unwrap().mut_chunks().remove(&uuid);
|
chunk_db.write().unwrap().mut_chunks().remove(&uuid);
|
||||||
return Err(io::Error::other("File larger than expected"));
|
return Err(io::Error::other("File larger than expected"));
|
||||||
}
|
}
|
||||||
|
chunk_db
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.mut_chunks()
|
||||||
|
.get_mut(&uuid)
|
||||||
|
.unwrap()
|
||||||
|
.1
|
||||||
|
.recieved_chunks
|
||||||
|
.insert(chunk);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -136,7 +136,6 @@ pub async fn clean_loop(
|
||||||
|
|
||||||
pub async fn clean_chunks(chunk_db: Arc<RwLock<Chunkbase>>, mut shutdown_signal: Receiver<()>) {
|
pub async fn clean_chunks(chunk_db: Arc<RwLock<Chunkbase>>, mut shutdown_signal: Receiver<()>) {
|
||||||
let mut interval = time::interval(TimeDelta::seconds(30).to_std().unwrap());
|
let mut interval = time::interval(TimeDelta::seconds(30).to_std().unwrap());
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
select! {
|
select! {
|
||||||
_ = interval.tick() => {let _ = chunk_db.write().unwrap().delete_timed_out();},
|
_ = interval.tick() => {let _ = chunk_db.write().unwrap().delete_timed_out();},
|
||||||
|
|
|
@ -11,38 +11,31 @@ use serde_with::serde_as;
|
||||||
|
|
||||||
/// A response to the client from the server
|
/// A response to the client from the server
|
||||||
#[derive(Deserialize, Serialize, Debug)]
|
#[derive(Deserialize, Serialize, Debug)]
|
||||||
#[serde(crate = "rocket::serde")]
|
#[serde(default)]
|
||||||
pub struct Settings {
|
pub struct Settings {
|
||||||
/// Maximum filesize in bytes
|
/// Maximum filesize in bytes
|
||||||
#[serde(default)]
|
|
||||||
pub max_filesize: u64,
|
pub max_filesize: u64,
|
||||||
|
|
||||||
/// Maximum filesize in bytes
|
/// Maximum filesize in bytes
|
||||||
#[serde(default)]
|
|
||||||
pub chunk_size: u64,
|
pub chunk_size: u64,
|
||||||
|
|
||||||
/// Is overwiting already uploaded files with the same hash allowed, or is
|
/// Is overwiting already uploaded files with the same hash allowed, or is
|
||||||
/// this a no-op?
|
/// this a no-op?
|
||||||
#[serde(default)]
|
|
||||||
pub overwrite: bool,
|
pub overwrite: bool,
|
||||||
|
|
||||||
/// Settings pertaining to duration information
|
/// Settings pertaining to duration information
|
||||||
pub duration: DurationSettings,
|
pub duration: DurationSettings,
|
||||||
|
|
||||||
/// The path to the database file
|
/// The path to the database file
|
||||||
#[serde(default)]
|
|
||||||
pub database_path: PathBuf,
|
pub database_path: PathBuf,
|
||||||
|
|
||||||
/// Temporary directory for stuff
|
/// Temporary directory for stuff
|
||||||
#[serde(default)]
|
|
||||||
pub temp_dir: PathBuf,
|
pub temp_dir: PathBuf,
|
||||||
|
|
||||||
/// Directory in which to store hosted files
|
/// Directory in which to store hosted files
|
||||||
#[serde(default)]
|
|
||||||
pub file_dir: PathBuf,
|
pub file_dir: PathBuf,
|
||||||
|
|
||||||
/// Settings pertaining to the server configuration
|
/// Settings pertaining to the server configuration
|
||||||
#[serde(default)]
|
|
||||||
pub server: ServerSettings,
|
pub server: ServerSettings,
|
||||||
|
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
|
@ -53,7 +46,7 @@ impl Default for Settings {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
max_filesize: 25.megabytes().into(), // 1 MB
|
max_filesize: 25.megabytes().into(), // 1 MB
|
||||||
chunk_size: 1.megabytes().into(),
|
chunk_size: 10.megabytes().into(),
|
||||||
overwrite: true,
|
overwrite: true,
|
||||||
duration: DurationSettings::default(),
|
duration: DurationSettings::default(),
|
||||||
server: ServerSettings::default(),
|
server: ServerSettings::default(),
|
||||||
|
|
|
@ -114,10 +114,11 @@ async function uploadFile(file, duration, maxSize) {
|
||||||
// Upload the file in `chunk_size` chunks
|
// Upload the file in `chunk_size` chunks
|
||||||
const chunkUploads = new Set();
|
const chunkUploads = new Set();
|
||||||
const progressValues = [];
|
const progressValues = [];
|
||||||
const concurrencyLimit = 4;
|
const concurrencyLimit = 5;
|
||||||
for (let start = 0; start < file.size; start += chunkedResponse.chunk_size) {
|
for (let chunk_num = 0; chunk_num < Math.floor(file.size / chunkedResponse.chunk_size) + 1; chunk_num ++) {
|
||||||
const chunk = file.slice(start, start + chunkedResponse.chunk_size)
|
const offset = Math.floor(chunk_num * chunkedResponse.chunk_size);
|
||||||
const url = "/upload/chunked/" + chunkedResponse.uuid + "?offset=" + start;
|
const chunk = file.slice(offset, offset + chunkedResponse.chunk_size);
|
||||||
|
const url = "/upload/chunked/" + chunkedResponse.uuid + "?chunk=" + chunk_num;
|
||||||
const ID = progressValues.push(0);
|
const ID = progressValues.push(0);
|
||||||
|
|
||||||
let upload = new Promise(function (resolve, reject) {
|
let upload = new Promise(function (resolve, reject) {
|
||||||
|
|
Loading…
Reference in a new issue