add ttl configuration for transcoding results
This commit is contained in:
parent
27596adef7
commit
f78333c24c
@ -34,6 +34,7 @@ You can change configuration using this environment variables:
|
||||
- `TEMP_DIR` - this can be used to change which directory should be used to store incoming downloads and transcoding results. Useful if you want to use a Docker volume for this. Default is system temp directory (`/tmp` for Linux).
|
||||
- `LOG_LEVEL` - changes log verbosity, default is `info`.
|
||||
- `MAX_BODY_SIZE` - changes max body size for `/enqueue`. Default is 100MB.
|
||||
- `RESULT_TTL_SEC` - sets result ttl in seconds, minimum 60 seconds. Default is 3600 (transcoding results are being kept and can be downloaded for an hour).
|
||||
- `FFMPEG_VERBOSE` - if set to `1` changes FFmpeg log level from quiet to trace.
|
||||
|
||||
# Roadmap
|
||||
|
@ -11,6 +11,8 @@ mod task;
|
||||
mod thread_pool;
|
||||
mod transcoder;
|
||||
|
||||
const WORK_DIR_IN_OUT_LIFETIME: u64 = 60 * 60;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
tracing_subscriber::fmt()
|
||||
@ -32,7 +34,12 @@ async fn main() {
|
||||
.unwrap()
|
||||
});
|
||||
Server::new(pool, temp_dir)
|
||||
.start_cleanup_task()
|
||||
.start_cleanup_task(
|
||||
env::var("RESULT_TTL_SEC")
|
||||
.ok()
|
||||
.and_then(|val| val.parse::<u64>().ok())
|
||||
.map_or(WORK_DIR_IN_OUT_LIFETIME, |val| val),
|
||||
)
|
||||
.serve(&addr)
|
||||
.await
|
||||
.expect("Cannot bind the addr")
|
||||
|
@ -29,7 +29,6 @@ use tokio::io::AsyncReadExt;
|
||||
use tokio_util::io::ReaderStream;
|
||||
|
||||
const CONTENT_LENGTH_LIMIT: usize = 100 * 1024 * 1024;
|
||||
const WORK_DIR_IN_OUT_LIFETIME: u64 = 60 * 60;
|
||||
|
||||
pub struct Server {
|
||||
thread_pool: Arc<ThreadPool>,
|
||||
@ -48,14 +47,14 @@ impl Server {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_cleanup_task(self) -> Self {
|
||||
pub fn start_cleanup_task(self, ttl: u64) -> Self {
|
||||
let dir_path = self.work_dir.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(Duration::from_secs(60));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
if let Err(err) = cleanup_directory(dir_path.as_str()).await {
|
||||
if let Err(err) = cleanup_directory(dir_path.as_str(), ttl).await {
|
||||
error!("could not perform working directory cleanup: {}", err);
|
||||
}
|
||||
}
|
||||
@ -183,7 +182,7 @@ fn error_response(msg: &str) -> (StatusCode, Json<ConvertResponse>) {
|
||||
)
|
||||
}
|
||||
|
||||
async fn cleanup_directory(dir_path: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
async fn cleanup_directory(dir_path: &str, ttl: u64) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Get the current time
|
||||
let now = SystemTime::now();
|
||||
|
||||
@ -213,7 +212,7 @@ async fn cleanup_directory(dir_path: &str) -> Result<(), Box<dyn std::error::Err
|
||||
let duration_since_modified = now.duration_since(modified_time)?;
|
||||
|
||||
// If the file is older than one hour, remove it
|
||||
if duration_since_modified > Duration::from_secs(WORK_DIR_IN_OUT_LIFETIME) {
|
||||
if duration_since_modified > Duration::from_secs(ttl) {
|
||||
fs::remove_file(file_path.clone()).await?;
|
||||
debug!("removed file: {:?}", file_path);
|
||||
}
|
||||
|
14
src/task.rs
14
src/task.rs
@ -203,7 +203,12 @@ fn send_error(
|
||||
if response.status() == 200 {
|
||||
Ok(response)
|
||||
} else {
|
||||
Err(format!("failed to send callback to {}. Status: {}", url, response.status()).into())
|
||||
Err(format!(
|
||||
"failed to send callback to {}. Status: {}",
|
||||
url,
|
||||
response.status()
|
||||
)
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,7 +223,12 @@ fn send_ok(id: uuid::Uuid, url: &str) -> Result<Response, Box<dyn std::error::Er
|
||||
if response.status() == 200 {
|
||||
Ok(response)
|
||||
} else {
|
||||
Err(format!("failed to send callback to {}. Status: {}", url, response.status()).into())
|
||||
Err(format!(
|
||||
"failed to send callback to {}. Status: {}",
|
||||
url,
|
||||
response.status()
|
||||
)
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user