Compare commits

..

No commits in common. "7e8cffa07f230001328c4d5e1f02617f9d81f96e" and "a5136c38d44bc199e054c07f960b9e748a8a803d" have entirely different histories.

6 changed files with 14 additions and 13 deletions

View File

@ -26,5 +26,5 @@ jobs:
uses: docker/build-push-action@v5
with:
push: true
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
tags: neur0toxine/atranscoder-rpc:latest

View File

@ -21,7 +21,7 @@ curl --location 'http://localhost:8090/enqueue' \
--form 'channelLayout="mono"' \
--form 'uploadUrl="http://127.0.0.1:8909/upload"'
```
3. Your `uploadUrl` will receive JSON response with job ID and error in case of failure and the entire transcoded file contents in case of success (success request will have `X-Task-Id` header with task ID). Use `Content-Type` header to differentiate between the two data types.
3. Your `uploadUrl` will receive JSON response with job ID and error in case of failure and the entire transcoded file contents in case of success. Use `Content-Type` header to differentiate between the two data types.
You can change configuration using this environment variables:
- `LISTEN` - change this environment variable to change TCP listen address. Default is `0.0.0.0:8090`.
@ -35,7 +35,7 @@ You can change configuration using this environment variables:
- [x] Remove input file after transcoding it.
- [x] Implement file upload to `uploadUrl` (if `Content-Type: application/json` then conversion was not successful and body contains an error info).
- [x] Remove transcoding result after uploading it to the `uploadUrl`.
- [ ] Docker image for `amd64` and `arm64` (currently only `amd64` is supported).
- [x] Docker image for `amd64` and `aarch64`.
- [ ] ~~Restart threads in case of panic.~~ It's better to not panic. Current error handling seems ok for now.
- [ ] ~~Statically linked binary for Docker image & result docker image based on `scratch` (reduce image size).~~ Not yet, see [Dockerfile.scratch](Dockerfile.scratch).
- [ ] Tests!

View File

@ -2,6 +2,11 @@ use axum_typed_multipart::{FieldData, TryFromMultipart};
use serde::{Deserialize, Serialize};
use tempfile::NamedTempFile;
#[derive(Serialize, Deserialize)]
pub struct Error {
pub error: String,
}
#[derive(Serialize, Deserialize)]
pub struct ConvertResponse {
pub id: Option<String>,

View File

@ -166,4 +166,4 @@ async fn cleanup_directory(dir_path: &str) -> Result<(), Box<dyn std::error::Err
}
Ok(())
}
}

View File

@ -36,11 +36,7 @@ impl Task {
std::fs::remove_file(Path::new(&self.params.input_path)).ok();
if let Err(err) = upload_file(
&self.id.to_string(),
&self.params.output_path,
&self.params.upload_url,
) {
if let Err(err) = upload_file(&self.params.output_path, &self.params.upload_url) {
error!(
"couldn't upload result for job id={}, file path {}: {}",
&self.id.to_string(),
@ -215,7 +211,6 @@ fn send_error(
}
fn upload_file<P: AsRef<Path>>(
id: &str,
file_path: P,
url: &str,
) -> Result<Response, Box<dyn std::error::Error>> {
@ -238,7 +233,6 @@ fn upload_file<P: AsRef<Path>>(
"Content-Disposition",
&format!("attachment; filename=\"{}\"", file_name),
)
.set("X-Task-Id", id)
.send_bytes(&buffer)?;
if response.status() == 200 {

View File

@ -240,7 +240,7 @@ fn filter_graph(
};
filter.add(&abuffersink_filter, "out", "")?;
let mut out = match filter.get("out") {
let mut out = match filter.get("out") {
Some(filter) => filter,
None => return Err(ffmpeg::Error::Unknown),
};
@ -257,7 +257,9 @@ fn filter_graph(
.contains(codec::capabilities::Capabilities::VARIABLE_FRAME_SIZE)
{
if let Some(mut out_filter) = filter.get("out") {
out_filter.sink().set_frame_size(encoder.frame_size());
out_filter
.sink()
.set_frame_size(encoder.frame_size());
}
}
}