improve zip downloads
This commit is contained in:
@@ -82,7 +82,7 @@ impl FileName {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A valid file type
|
/// A valid file type
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Display)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Display)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub enum FileType {
|
pub enum FileType {
|
||||||
File,
|
File,
|
||||||
@@ -267,23 +267,27 @@ impl From<AbsoluteFilePath> for FilePath {
|
|||||||
pub type FileStreamInner =
|
pub type FileStreamInner =
|
||||||
Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + Unpin + 'static>;
|
Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + Unpin + 'static>;
|
||||||
|
|
||||||
pub struct FileStream(FileStreamInner);
|
pub struct FileStream(FileType, FileStreamInner);
|
||||||
|
|
||||||
impl FileStream {
|
impl FileStream {
|
||||||
pub fn new<S>(stream: S) -> Self
|
pub fn new<S>(file_type: FileType, stream: S) -> Self
|
||||||
where
|
where
|
||||||
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + Unpin + 'static,
|
S: Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + Unpin + 'static,
|
||||||
{
|
{
|
||||||
Self(Box::new(stream))
|
Self(file_type, Box::new(stream))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn file_type(&self) -> FileType {
|
||||||
|
self.0
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn stream(&self) -> &FileStreamInner {
|
pub fn stream(&self) -> &FileStreamInner {
|
||||||
&self.0
|
&self.1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<FileStream> for FileStreamInner {
|
impl From<FileStream> for FileStreamInner {
|
||||||
fn from(value: FileStream) -> Self {
|
fn from(value: FileStream) -> Self {
|
||||||
value.0
|
value.1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use axum::{
|
use axum::{
|
||||||
body::Body,
|
|
||||||
extract::{Query, State},
|
extract::{Query, State},
|
||||||
|
response::IntoResponse,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
@@ -74,13 +74,13 @@ pub async fn cat_share<WS: WarrenService, AS: AuthService>(
|
|||||||
State(state): State<AppState<WS, AS>>,
|
State(state): State<AppState<WS, AS>>,
|
||||||
SharePasswordHeader(password): SharePasswordHeader,
|
SharePasswordHeader(password): SharePasswordHeader,
|
||||||
Query(request): Query<ShareCatHttpRequestBody>,
|
Query(request): Query<ShareCatHttpRequestBody>,
|
||||||
) -> Result<Body, ApiError> {
|
) -> Result<impl IntoResponse, ApiError> {
|
||||||
let domain_request = request.try_into_domain(password)?;
|
let domain_request = request.try_into_domain(password)?;
|
||||||
|
|
||||||
state
|
state
|
||||||
.warren_service
|
.warren_service
|
||||||
.warren_share_cat(domain_request)
|
.warren_share_cat(domain_request)
|
||||||
.await
|
.await
|
||||||
.map(|response| FileStream::from(response).into())
|
.map(|response| FileStream::from(response))
|
||||||
.map_err(ApiError::from)
|
.map_err(ApiError::from)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,8 +16,9 @@ mod warren_rm;
|
|||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
Router,
|
Router,
|
||||||
body::Body,
|
|
||||||
extract::DefaultBodyLimit,
|
extract::DefaultBodyLimit,
|
||||||
|
http::{self, HeaderValue, Response},
|
||||||
|
response::IntoResponse,
|
||||||
routing::{get, post},
|
routing::{get, post},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -68,10 +69,23 @@ impl From<&File> for WarrenFileElement {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<FileStream> for Body {
|
impl IntoResponse for FileStream {
|
||||||
fn from(value: FileStream) -> Self {
|
fn into_response(self) -> axum::response::Response {
|
||||||
let inner: FileStreamInner = value.into();
|
let mut builder = Response::builder().header(http::header::TRANSFER_ENCODING, "chunked");
|
||||||
Body::from_stream(inner)
|
|
||||||
|
if let Some(headers) = builder.headers_mut() {
|
||||||
|
if self.file_type() == FileType::Directory {
|
||||||
|
headers.insert(
|
||||||
|
http::header::CONTENT_TYPE,
|
||||||
|
HeaderValue::from_str("application/zip").unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
headers.remove(http::header::CONTENT_LENGTH);
|
||||||
|
}
|
||||||
|
|
||||||
|
let inner: FileStreamInner = self.into();
|
||||||
|
builder.body(axum::body::Body::from_stream(inner)).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use axum::{
|
use axum::{
|
||||||
body::Body,
|
|
||||||
extract::{Query, State},
|
extract::{Query, State},
|
||||||
|
response::IntoResponse,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
@@ -62,13 +62,12 @@ pub async fn fetch_file<WS: WarrenService, AS: AuthService>(
|
|||||||
State(state): State<AppState<WS, AS>>,
|
State(state): State<AppState<WS, AS>>,
|
||||||
SessionIdHeader(session): SessionIdHeader,
|
SessionIdHeader(session): SessionIdHeader,
|
||||||
Query(request): Query<WarrenCatHttpRequestBody>,
|
Query(request): Query<WarrenCatHttpRequestBody>,
|
||||||
) -> Result<Body, ApiError> {
|
) -> Result<impl IntoResponse, ApiError> {
|
||||||
let domain_request = AuthRequest::new(session, request.try_into_domain()?);
|
let domain_request = AuthRequest::new(session, request.try_into_domain()?);
|
||||||
|
|
||||||
state
|
state
|
||||||
.auth_service
|
.auth_service
|
||||||
.auth_warren_cat(domain_request, state.warren_service.as_ref())
|
.auth_warren_cat(domain_request, state.warren_service.as_ref())
|
||||||
.await
|
.await
|
||||||
.map(|contents| contents.into())
|
|
||||||
.map_err(ApiError::from)
|
.map_err(ApiError::from)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -225,17 +225,16 @@ impl FileSystem {
|
|||||||
|
|
||||||
let (sync_tx, sync_rx) =
|
let (sync_tx, sync_rx) =
|
||||||
std::sync::mpsc::channel::<Result<bytes::Bytes, std::io::Error>>();
|
std::sync::mpsc::channel::<Result<bytes::Bytes, std::io::Error>>();
|
||||||
let (tx, rx) =
|
let (tx, rx) = tokio::sync::mpsc::channel::<Result<bytes::Bytes, std::io::Error>>(1024);
|
||||||
tokio::sync::mpsc::channel::<Result<bytes::Bytes, std::io::Error>>(65536);
|
|
||||||
|
|
||||||
|
tokio::task::spawn(create_zip(path, sync_tx));
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
while let Ok(v) = sync_rx.recv() {
|
while let Ok(v) = sync_rx.recv() {
|
||||||
let _ = tx.send(v).await;
|
let _ = tx.send(v).await;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
tokio::task::spawn(create_zip(path, sync_tx));
|
|
||||||
|
|
||||||
let stream = FileStream::new(ReceiverStream::new(rx));
|
let stream = FileStream::new(FileType::Directory, ReceiverStream::new(rx));
|
||||||
|
|
||||||
return Ok(stream);
|
return Ok(stream);
|
||||||
}
|
}
|
||||||
@@ -246,7 +245,7 @@ impl FileSystem {
|
|||||||
bail!("File size exceeds configured limit");
|
bail!("File size exceeds configured limit");
|
||||||
}
|
}
|
||||||
|
|
||||||
let stream = FileStream::new(ReaderStream::new(file));
|
let stream = FileStream::new(FileType::File, ReaderStream::new(file));
|
||||||
|
|
||||||
Ok(stream)
|
Ok(stream)
|
||||||
}
|
}
|
||||||
@@ -469,13 +468,17 @@ where
|
|||||||
P: AsRef<Path>,
|
P: AsRef<Path>,
|
||||||
{
|
{
|
||||||
let options = zip::write::SimpleFileOptions::default()
|
let options = zip::write::SimpleFileOptions::default()
|
||||||
.compression_method(zip::CompressionMethod::Deflated)
|
.compression_method(zip::CompressionMethod::Stored)
|
||||||
|
.compression_level(None)
|
||||||
|
.large_file(true)
|
||||||
.unix_permissions(0o644);
|
.unix_permissions(0o644);
|
||||||
|
|
||||||
let mut file_buf = Vec::new();
|
let mut file_buf = Vec::new();
|
||||||
let mut zip = zip::write::ZipWriter::new_stream(ChannelWriter(tx));
|
let mut zip = zip::write::ZipWriter::new_stream(ChannelWriter(tx));
|
||||||
|
|
||||||
for entry_path_buf in walk_dir(&path).await? {
|
let entries = walk_dir(&path).await?;
|
||||||
|
|
||||||
|
for entry_path_buf in entries {
|
||||||
let entry_path = entry_path_buf.as_path();
|
let entry_path = entry_path_buf.as_path();
|
||||||
let entry_str = entry_path
|
let entry_str = entry_path
|
||||||
.strip_prefix(&path)?
|
.strip_prefix(&path)?
|
||||||
@@ -489,12 +492,14 @@ where
|
|||||||
|
|
||||||
zip.start_file(entry_str, options)?;
|
zip.start_file(entry_str, options)?;
|
||||||
let mut entry_file = tokio::fs::File::open(entry_path).await?;
|
let mut entry_file = tokio::fs::File::open(entry_path).await?;
|
||||||
|
|
||||||
entry_file.read_to_end(&mut file_buf).await?;
|
entry_file.read_to_end(&mut file_buf).await?;
|
||||||
|
|
||||||
zip.write_all(&file_buf)?;
|
zip.write_all(&file_buf)?;
|
||||||
file_buf.clear();
|
file_buf.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(zip.finish()?);
|
zip.finish()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -504,7 +509,7 @@ struct ChannelWriter(std::sync::mpsc::Sender<Result<bytes::Bytes, std::io::Error
|
|||||||
impl Write for ChannelWriter {
|
impl Write for ChannelWriter {
|
||||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||||
let len = buf.len();
|
let len = buf.len();
|
||||||
let data = bytes::Bytes::copy_from_slice(&buf[..(len)]);
|
let data = bytes::Bytes::copy_from_slice(buf);
|
||||||
|
|
||||||
self.0
|
self.0
|
||||||
.send(Ok(data))
|
.send(Ok(data))
|
||||||
|
|||||||
Reference in New Issue
Block a user