add basic backend

This commit is contained in:
2025-07-21 17:08:30 +01:00
parent d0ca82972f
commit 50d094be8f
14 changed files with 2714 additions and 2 deletions

1
backend/.dockerignore Normal file
View File

@ -0,0 +1 @@
target/

1
backend/.gitignore vendored
View File

@ -1 +1,2 @@
/target
/logs

2415
backend/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -4,3 +4,29 @@ version = "0.1.0"
edition = "2024"
[dependencies]
axum = "0.8.4"
clap = { version = "4.5.41", features = ["derive"] }
http = "1.3.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.141"
tokio = { version = "1.0", features = ["full"] }
tower = { version = "0.5.2", features = ["full"] }
tower-http = { version = "0.6.6", features = ["timeout", "trace"] }
tower-http-util = "0.1.0"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
uuid = { version = "1.17.0", features = ["serde", "v7"] }
[dev-dependencies]
reqwest = { version = "0.12.22", features = ["json"] }
[lib]
path = "src/lib.rs"
[[bin]]
path = "src/main.rs"
name ="nuchat"
[features]
default = []
shutdown = []

26
backend/Dockerfile Normal file
View File

@ -0,0 +1,26 @@
FROM rust:1.87 AS base
RUN cargo install --locked cargo-chef sccache
ENV RUSTC_WRAPPER=sccache SCCACHE_DIR=/sccache
FROM base AS planner
WORKDIR /app
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM base AS builder
WORKDIR /app
COPY --from=planner /app/recipe.json recipe.json
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=$SCCACHE_DIR,sharing=locked \
cargo chef cook --release --recipe-path recipe.json
COPY . .
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=$SCCACHE_DIR,sharing=locked \
cargo build --release
FROM gcr.io/distroless/cc-debian12 AS runner
WORKDIR /app
COPY --from=builder /app/target/release/nuchat .
ENTRYPOINT [ "/app/nuchat" ]

17
backend/Justfile Normal file
View File

@ -0,0 +1,17 @@
build:
cargo build --features shutdown
run:
cargo run --features shutdown
start:
cargo run --features shutdown 2>&1 > logs/nuchat.log
test:
cargo run --features shutdown -- --port 7001 2>&1 > logs/nuchat.log &
cargo test
curl -s -X POST localhost:7001/admin/shutdown
default := 'run'
watch CMD=default:
watchexec -w src -r just {{CMD}}

15
backend/readme.md Normal file
View File

@ -0,0 +1,15 @@
# Backend
Generate random secret using. This secret will be used to authenticate users on all `/admin` routes
```bash
ADMIN_SECRET=$(openssl rand --base64 32)
```
## Features
### shutdown
this feature enables an endpoint that allows you to shutdown the server.
**WARNING**: If the `ADMIN_SECRET` env var is not set then this endpoint is completely exposed and allows anyone to shutdown the server.

3
backend/src/lib.rs Normal file
View File

@ -0,0 +1,3 @@
mod router;
pub use router::app;

View File

@ -1,3 +1,79 @@
fn main() {
println!("Hello, world!");
use std::sync::mpsc;
use clap::Parser;
use nuchat::app;
use tokio::net::TcpListener;
use tokio::signal;
use tracing::info;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
/// Port to run server on
#[arg(long, default_value_t = 7000)]
port: u32,
/// Host to run server on
#[arg(long, default_value = "127.0.0.1")]
host: String,
}
#[tokio::main]
async fn main() {
let args = Args::parse();
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| {
format!("{}=debug,tower_http=debug", env!("CARGO_CRATE_NAME")).into()
}),
)
.with(tracing_subscriber::fmt::layer().with_target(false))
.init();
let listener = TcpListener::bind(format!("{}:{}", args.host, args.port))
.await
.unwrap();
tracing::debug!("listening on {}", listener.local_addr().unwrap());
let (app, rx) = app();
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal(rx))
.await
.unwrap();
info!("Server stopped");
}
#[allow(clippy::unused_async)]
#[allow(clippy::unused)]
async fn await_shutdown(rx: mpsc::Receiver<bool>) -> Result<bool, mpsc::RecvError> {
rx.recv()
}
async fn shutdown_signal(rx: mpsc::Receiver<bool>) {
let endpoint = tokio::spawn(async move { rx.recv() }).into_future();
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
tokio::select! {
_ = ctrl_c => {},
_ = terminate => {},
_ = endpoint => {},
}
info!("Shutting server down gracefully...");
}

83
backend/src/router.rs Normal file
View File

@ -0,0 +1,83 @@
mod healthcheck;
use std::sync::mpsc;
use std::time::Duration;
use axum::extract::Request;
use axum::middleware::{Next, from_fn};
use axum::response::Response;
use axum::routing::{get, post};
use axum::{Router, body::Body};
use http::StatusCode;
use tower::ServiceBuilder;
use tower_http::timeout::TimeoutLayer;
use tower_http::trace::TraceLayer;
use tracing::Level;
use uuid::Uuid;
pub fn app() -> (Router, mpsc::Receiver<bool>) {
let (tx, rx) = mpsc::channel();
(
Router::new()
.route("/healthcheck", get(healthcheck::healthcheck))
.route("/forever", get(std::future::pending::<()>))
.nest("/admin", admin(tx))
.layer(
ServiceBuilder::new()
.layer(
TraceLayer::new_for_http().make_span_with(|req: &Request<Body>| {
tracing::span!(
Level::DEBUG,
"request",
trace_id = Uuid::now_v7().to_string(),
method = format!("{}", req.method()),
uri = format!("{}", req.uri()),
)
}),
)
.layer(TimeoutLayer::new(Duration::from_secs(10))),
),
rx,
)
}
fn admin(tx: mpsc::Sender<bool>) -> Router {
let r = Router::new().route("/test", get(async || StatusCode::OK));
let r = add_shutdown_endpoint(r, tx);
r.layer(from_fn(async |req: Request, next: Next| {
if let Ok(secret) = std::env::var("ADMIN_SECRET") {
println!("ADMIN_SECRET: {secret}");
match req.headers().get("Authorization") {
Some(key) if secret == key.to_owned() => (),
_ => {
return Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(Body::empty())
.unwrap();
}
}
}
next.run(req).await
}))
}
#[cfg(feature = "shutdown")]
fn add_shutdown_endpoint(r: Router, tx: mpsc::Sender<bool>) -> Router {
r.route(
"/shutdown",
post(async move || {
let res = tx.send(true);
if res.is_ok() {
StatusCode::OK
} else {
StatusCode::INTERNAL_SERVER_ERROR
}
}),
)
}
#[cfg(not(feature = "shutdown"))]
fn add_shutdown_endpoint(r: Router, _: mpsc::Sender<bool>) -> Router {
r
}

View File

@ -0,0 +1,6 @@
use axum::response::Json;
use serde_json::{Value, json};
pub async fn healthcheck() -> Json<Value> {
Json(json!({"healthy": true}))
}

View File

@ -0,0 +1,26 @@
use std::{sync::LazyLock, time::Duration};
use http::StatusCode;
use reqwest::Client;
static client: LazyLock<Client> = LazyLock::new(|| {
Client::builder()
.timeout(Duration::from_secs(10))
.connect_timeout(Duration::from_secs(5))
.build()
.expect("Could not build reqwest client")
});
#[tokio::test]
async fn test_healthcheck_returns_healthy() {
let resp = client
.get("http://localhost:7001/healthcheck")
.send()
.await
.expect("Could not get server");
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.text().await.expect("Could not get response text");
assert_eq!(body, String::from(r#"{"healthy":true}"#));
}