mirror of
https://github.com/Dangoware/confetti-box.git
synced 2025-04-19 15:22:57 -05:00
Improved performance by properly limiting the number of uploads
This commit is contained in:
parent
bf17b1bc04
commit
ca63739bfb
2 changed files with 33 additions and 25 deletions
10
src/lib.rs
10
src/lib.rs
|
@ -203,7 +203,7 @@ impl ChunkedResponse {
|
||||||
|
|
||||||
/// Start a chunked upload. Response contains all the info you need to continue
|
/// Start a chunked upload. Response contains all the info you need to continue
|
||||||
/// uploading chunks.
|
/// uploading chunks.
|
||||||
#[post("/upload/chunked", data = "<file_info>", rank = 2)]
|
#[post("/upload/chunked", data = "<file_info>")]
|
||||||
pub async fn chunked_upload_start(
|
pub async fn chunked_upload_start(
|
||||||
db: &State<Arc<RwLock<Chunkbase>>>,
|
db: &State<Arc<RwLock<Chunkbase>>>,
|
||||||
settings: &State<Settings>,
|
settings: &State<Settings>,
|
||||||
|
@ -238,11 +238,11 @@ pub async fn chunked_upload_start(
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[post("/upload/chunked?<uuid>&<offset>", data = "<data>", rank = 1)]
|
#[post("/upload/chunked/<uuid>?<offset>", data = "<data>")]
|
||||||
pub async fn chunked_upload_continue(
|
pub async fn chunked_upload_continue(
|
||||||
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
||||||
data: Data<'_>,
|
data: Data<'_>,
|
||||||
uuid: String,
|
uuid: &str,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
) -> Result<(), io::Error> {
|
) -> Result<(), io::Error> {
|
||||||
let uuid = Uuid::parse_str(&uuid).map_err(|e| io::Error::other(e))?;
|
let uuid = Uuid::parse_str(&uuid).map_err(|e| io::Error::other(e))?;
|
||||||
|
@ -285,12 +285,12 @@ pub async fn chunked_upload_continue(
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Finalize a chunked upload
|
/// Finalize a chunked upload
|
||||||
#[get("/upload/chunked?<uuid>&finish", rank = 3)]
|
#[get("/upload/chunked/<uuid>?finish")]
|
||||||
pub async fn chunked_upload_finish(
|
pub async fn chunked_upload_finish(
|
||||||
main_db: &State<Arc<RwLock<Mochibase>>>,
|
main_db: &State<Arc<RwLock<Mochibase>>>,
|
||||||
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
chunk_db: &State<Arc<RwLock<Chunkbase>>>,
|
||||||
settings: &State<Settings>,
|
settings: &State<Settings>,
|
||||||
uuid: String,
|
uuid: &str,
|
||||||
) -> Result<Json<MochiFile>, io::Error> {
|
) -> Result<Json<MochiFile>, io::Error> {
|
||||||
let now = Utc::now();
|
let now = Utc::now();
|
||||||
let uuid = Uuid::parse_str(&uuid).map_err(|e| io::Error::other(e))?;
|
let uuid = Uuid::parse_str(&uuid).map_err(|e| io::Error::other(e))?;
|
||||||
|
|
|
@ -58,24 +58,25 @@ async function pasteSubmit(evt) {
|
||||||
}
|
}
|
||||||
|
|
||||||
async function sendFiles(files, duration, maxSize) {
|
async function sendFiles(files, duration, maxSize) {
|
||||||
const uploadArray = [];
|
const inProgressUploads = new Set();
|
||||||
const concurrencyLimit = 10;
|
const concurrencyLimit = 10;
|
||||||
|
|
||||||
for (const file of files) {
|
for (const file of files) {
|
||||||
// Add each upload to the array
|
// Start the upload and add it to the set of in-progress uploads
|
||||||
uploadArray.push(uploadFile(file, duration, maxSize));
|
const uploadPromise = uploadFile(file, duration, maxSize);
|
||||||
|
inProgressUploads.add(uploadPromise);
|
||||||
|
|
||||||
// If the number of uploads reaches the concurrency limit, wait for them to finish
|
// Once an upload finishes, remove it from the set
|
||||||
if (uploadArray.length >= concurrencyLimit) {
|
uploadPromise.finally(() => inProgressUploads.delete(uploadPromise));
|
||||||
await Promise.allSettled(uploadArray);
|
|
||||||
uploadArray.length = 0; // Clear the array after each batch
|
// If we reached the concurrency limit, wait for one of the uploads to complete
|
||||||
|
if (inProgressUploads.size >= concurrencyLimit) {
|
||||||
|
await Promise.race(inProgressUploads);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Final batch to handle any remaining files
|
// Wait for any remaining uploads to complete
|
||||||
if (uploadArray.length > 0) {
|
await Promise.allSettled(inProgressUploads);
|
||||||
await Promise.allSettled(uploadArray);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async function uploadFile(file, duration, maxSize) {
|
async function uploadFile(file, duration, maxSize) {
|
||||||
|
@ -108,11 +109,12 @@ async function uploadFile(file, duration, maxSize) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload the file in `chunk_size` chunks
|
// Upload the file in `chunk_size` chunks
|
||||||
const chunkUploadArray = [];
|
const chunkUploads = new Set();
|
||||||
const progressValues = [];
|
const progressValues = [];
|
||||||
|
const concurrencyLimit = 4;
|
||||||
for (let start = 0; start < file.size; start += chunkedResponse.chunk_size) {
|
for (let start = 0; start < file.size; start += chunkedResponse.chunk_size) {
|
||||||
const chunk = file.slice(start, start + chunkedResponse.chunk_size)
|
const chunk = file.slice(start, start + chunkedResponse.chunk_size)
|
||||||
const url = "/upload/chunked?uuid=" + chunkedResponse.uuid + "&offset=" + start;
|
const url = "/upload/chunked/" + chunkedResponse.uuid + "?offset=" + start;
|
||||||
const ID = progressValues.push(0);
|
const ID = progressValues.push(0);
|
||||||
|
|
||||||
let upload = new Promise(function (resolve, reject) {
|
let upload = new Promise(function (resolve, reject) {
|
||||||
|
@ -122,23 +124,29 @@ async function uploadFile(file, duration, maxSize) {
|
||||||
(p) => {uploadProgress(p, progressBar, progressText, progressValues, file.size, ID);}, true
|
(p) => {uploadProgress(p, progressBar, progressText, progressValues, file.size, ID);}, true
|
||||||
);
|
);
|
||||||
|
|
||||||
request.onload = () => {
|
request.onload = (e) => {
|
||||||
if (this.status >= 200 && this.status < 300) {
|
if (e.target.status >= 200 && e.target.status < 300) {
|
||||||
resolve(request.response);
|
resolve(request.response);
|
||||||
} else {
|
} else {
|
||||||
reject({status: this.status, statusText: request.statusText});
|
reject({status: e.target.status, statusText: request.statusText});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
request.onerror = () => reject({status: this.status, statusText: request.statusText});
|
request.onerror = (e) => {
|
||||||
|
reject({status: e.target.status, statusText: request.statusText})
|
||||||
|
};
|
||||||
request.send(chunk);
|
request.send(chunk);
|
||||||
});
|
});
|
||||||
|
|
||||||
chunkUploadArray.push(upload);
|
chunkUploads.add(upload);
|
||||||
|
upload.finally(() => chunkUploads.delete(upload));
|
||||||
|
if (chunkUploads.size >= concurrencyLimit) {
|
||||||
|
await Promise.race(chunkUploads);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
await Promise.allSettled(chunkUploadArray);
|
await Promise.allSettled(chunkUploads);
|
||||||
|
|
||||||
// Finish the request and update the progress box
|
// Finish the request and update the progress box
|
||||||
const result = await fetch("/upload/chunked?uuid=" + chunkedResponse.uuid + "&finish");
|
const result = await fetch("/upload/chunked/" + chunkedResponse.uuid + "?finish");
|
||||||
uploadComplete(result, progressBar, progressText, linkRow);
|
uploadComplete(result, progressBar, progressText, linkRow);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue