Compare commits

..

2 Commits

Author SHA1 Message Date
7e8cffa07f add X-Task-Id for upload 2024-05-26 16:32:12 +03:00
5219908242 tmp: remove arm64 support 2024-05-26 15:44:26 +03:00
6 changed files with 13 additions and 14 deletions

View File

@ -26,5 +26,5 @@ jobs:
uses: docker/build-push-action@v5
with:
push: true
platforms: linux/amd64,linux/arm64
platforms: linux/amd64
tags: neur0toxine/atranscoder-rpc:latest

View File

@ -21,7 +21,7 @@ curl --location 'http://localhost:8090/enqueue' \
--form 'channelLayout="mono"' \
--form 'uploadUrl="http://127.0.0.1:8909/upload"'
```
3. Your `uploadUrl` will receive JSON response with job ID and error in case of failure and the entire transcoded file contents in case of success. Use `Content-Type` header to differentiate between the two data types.
3. Your `uploadUrl` will receive JSON response with job ID and error in case of failure and the entire transcoded file contents in case of success (success request will have `X-Task-Id` header with task ID). Use `Content-Type` header to differentiate between the two data types.
You can change configuration using this environment variables:
- `LISTEN` - change this environment variable to change TCP listen address. Default is `0.0.0.0:8090`.
@ -35,7 +35,7 @@ You can change configuration using this environment variables:
- [x] Remove input file after transcoding it.
- [x] Implement file upload to `uploadUrl` (if `Content-Type: application/json` then conversion was not successful and body contains an error info).
- [x] Remove transcoding result after uploading it to the `uploadUrl`.
- [x] Docker image for `amd64` and `aarch64`.
- [ ] Docker image for `amd64` and `arm64` (currently only `amd64` is supported).
- [ ] ~~Restart threads in case of panic.~~ It's better to not panic. Current error handling seems ok for now.
- [ ] ~~Statically linked binary for Docker image & result docker image based on `scratch` (reduce image size).~~ Not yet, see [Dockerfile.scratch](Dockerfile.scratch).
- [ ] Tests!

View File

@ -2,11 +2,6 @@ use axum_typed_multipart::{FieldData, TryFromMultipart};
use serde::{Deserialize, Serialize};
use tempfile::NamedTempFile;
#[derive(Serialize, Deserialize)]
pub struct Error {
pub error: String,
}
#[derive(Serialize, Deserialize)]
pub struct ConvertResponse {
pub id: Option<String>,

View File

@ -166,4 +166,4 @@ async fn cleanup_directory(dir_path: &str) -> Result<(), Box<dyn std::error::Err
}
Ok(())
}
}

View File

@ -36,7 +36,11 @@ impl Task {
std::fs::remove_file(Path::new(&self.params.input_path)).ok();
if let Err(err) = upload_file(&self.params.output_path, &self.params.upload_url) {
if let Err(err) = upload_file(
&self.id.to_string(),
&self.params.output_path,
&self.params.upload_url,
) {
error!(
"couldn't upload result for job id={}, file path {}: {}",
&self.id.to_string(),
@ -211,6 +215,7 @@ fn send_error(
}
fn upload_file<P: AsRef<Path>>(
id: &str,
file_path: P,
url: &str,
) -> Result<Response, Box<dyn std::error::Error>> {
@ -233,6 +238,7 @@ fn upload_file<P: AsRef<Path>>(
"Content-Disposition",
&format!("attachment; filename=\"{}\"", file_name),
)
.set("X-Task-Id", id)
.send_bytes(&buffer)?;
if response.status() == 200 {

View File

@ -240,7 +240,7 @@ fn filter_graph(
};
filter.add(&abuffersink_filter, "out", "")?;
let mut out = match filter.get("out") {
let mut out = match filter.get("out") {
Some(filter) => filter,
None => return Err(ffmpeg::Error::Unknown),
};
@ -257,9 +257,7 @@ fn filter_graph(
.contains(codec::capabilities::Capabilities::VARIABLE_FRAME_SIZE)
{
if let Some(mut out_filter) = filter.get("out") {
out_filter
.sink()
.set_frame_size(encoder.frame_size());
out_filter.sink().set_frame_size(encoder.frame_size());
}
}
}