Prepare for an eventual frontend

Move the backend code into a backend/ directory, and create a frontend/
directory containing an empty sveltekit project.
This commit is contained in:
Zeph Levy 2025-12-16 17:20:31 +01:00
parent d65efebb44
commit df7d4f3507
25 changed files with 461 additions and 2 deletions

50
backend/src/main.rs Normal file
View file

@ -0,0 +1,50 @@
use async_graphql::http::GraphiQLSource;
use async_graphql_axum::{GraphQLRequest, GraphQLResponse};
use axum::{
Router,
response::Html,
routing::{get, post},
};
use std::net::SocketAddr;
mod models;
mod resolvers;
mod schema;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let schema = schema::build_schema().await?;
let app = Router::new()
.route(
"/graphql",
post({
let schema = schema.clone();
move |req: GraphQLRequest| async move {
GraphQLResponse::from(schema.execute(req.into_inner()).await)
}
}),
)
.route(
"/graphql",
get({
let schema = schema.clone();
move || async move { axum::Json(schema.sdl()) }
}),
)
.route(
"/graphiql",
get(Html(GraphiQLSource::build().endpoint("/graphql").finish())),
);
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
println!("listening on http://{}", addr);
axum::serve(
tokio::net::TcpListener::bind(addr).await.unwrap(),
app.into_make_service(),
)
.await
.unwrap();
Ok(())
}

83
backend/src/models.rs Normal file
View file

@ -0,0 +1,83 @@
use async_graphql::{ID, Json, SimpleObject};
use serde_json::Value;
#[derive(SimpleObject)]
pub struct Dataset {
pub id: ID,
pub name: String,
pub description: Option<String>,
}
#[derive(sqlx::FromRow)]
pub struct DatasetRow {
pub id: i32,
pub name: String,
pub description: Option<String>,
}
impl From<DatasetRow> for Dataset {
fn from(row: DatasetRow) -> Self {
Dataset {
id: row.id.into(),
name: row.name,
description: row.description,
}
}
}
#[derive(SimpleObject)]
pub struct Location {
pub id: ID,
pub name: Option<String>,
pub lat: f64,
pub lon: f64,
}
#[derive(sqlx::FromRow)]
pub struct LocationRow {
pub id: i32,
pub name: Option<String>,
pub lat: f64,
pub lon: f64,
}
impl From<LocationRow> for Location {
fn from(row: LocationRow) -> Self {
Location {
id: row.id.into(),
name: row.name,
lat: row.lat,
lon: row.lon,
}
}
}
#[derive(SimpleObject)]
pub struct Record {
pub id: ID,
pub dataset_id: ID,
pub location_id: Option<ID>,
pub timestamp: String,
pub data: Option<Json<Value>>,
}
#[derive(sqlx::FromRow)]
pub struct RecordRow {
pub id: i32,
pub dataset_id: i32,
pub location_id: Option<i32>,
pub timestamp: chrono::DateTime<chrono::Utc>,
pub data: Option<Value>,
}
impl From<RecordRow> for Record {
fn from(row: RecordRow) -> Self {
Record {
id: row.id.into(),
dataset_id: row.dataset_id.into(),
location_id: row.location_id.map(|id| id.into()),
timestamp: row.timestamp.to_rfc3339(),
data: row.data.map(Json),
}
}
}

132
backend/src/resolvers.rs Normal file
View file

@ -0,0 +1,132 @@
use crate::models::*;
use async_graphql::{Context, Object};
use sqlx::PgPool;
pub struct QueryRoot;
fn get_pg_pool<'a>(ctx: &'a Context<'_>) -> &'a PgPool {
ctx.data::<PgPool>()
.expect("A database connection does not exist")
}
#[Object]
impl QueryRoot {
pub async fn datasets(&self, ctx: &Context<'_>) -> anyhow::Result<Vec<Dataset>> {
let pool = get_pg_pool(ctx);
let rows: Vec<DatasetRow> = sqlx::query_as("SELECT * FROM datasets")
.fetch_all(pool)
.await?;
Ok(rows.into_iter().map(Dataset::from).collect())
}
pub async fn query_dataset(
&self,
ctx: &Context<'_>,
id: i32,
) -> anyhow::Result<Option<Record>> {
let pool = get_pg_pool(ctx);
let row: Option<RecordRow> = sqlx::query_as(
r#"SELECT * FROM records
WHERE dataset_id = $1
ORDER BY timestamp DESC
LIMIT 1;"#,
)
.bind(id)
.fetch_optional(pool)
.await?;
Ok(row.map(Into::into))
}
pub async fn get_location(&self, ctx: &Context<'_>, id: i32) -> anyhow::Result<Location> {
let pool = get_pg_pool(ctx);
let row: LocationRow = sqlx::query_as(
r#"SELECT * FROM locations
WHERE id = $1
LIMIT 1;"#,
)
.bind(id)
.fetch_one(pool)
.await?;
Ok(row.into())
}
}
pub struct MutationRoot;
#[Object]
impl MutationRoot {
pub async fn create_dataset(
&self,
ctx: &Context<'_>,
name: String,
description: Option<String>,
) -> anyhow::Result<i32> {
let pool = get_pg_pool(ctx);
let id: i32 = sqlx::query_scalar(
r#"INSERT INTO datasets (name, description)
VALUES ($1, $2)
RETURNING id;"#,
)
.bind(name)
.bind(description)
.fetch_one(pool)
.await?;
Ok(id)
}
pub async fn create_location(
&self,
ctx: &Context<'_>,
name: String,
lat: f64,
lon: f64,
) -> anyhow::Result<i32> {
let pool = get_pg_pool(ctx);
let id: i32 = sqlx::query_scalar(
r#"INSERT INTO locations (name, lat, lon)
VALUES ($1, $2)
RETURNING id;"#,
)
.bind(name)
.bind(lat)
.bind(lon)
.fetch_one(pool)
.await?;
Ok(id)
}
pub async fn record_into_dataset(
&self,
ctx: &Context<'_>,
dataset_id: i32,
location_id: Option<i32>,
data: serde_json::Value,
) -> anyhow::Result<Record> {
let pool = get_pg_pool(ctx);
let row: RecordRow = sqlx::query_as(
r#"
INSERT INTO records (dataset_id, location_id, data)
VALUES ($1, $2, $3)
RETURNING *;
"#,
)
.bind(dataset_id)
.bind(location_id)
.bind(data)
.fetch_one(pool)
.await?;
Ok(Record::from(row))
}
}

32
backend/src/schema.rs Normal file
View file

@ -0,0 +1,32 @@
use crate::resolvers::{MutationRoot, QueryRoot};
use async_graphql::{EmptySubscription, Schema};
use sqlx::{PgPool, postgres::PgPoolOptions};
use std::{env, time::Duration};
use tokio::time::timeout;
pub type AppSchema = Schema<QueryRoot, MutationRoot, EmptySubscription>;
pub async fn build_schema() -> anyhow::Result<AppSchema> {
let pool = create_pool().await?;
Ok(Schema::build(QueryRoot, MutationRoot, EmptySubscription)
.data(pool)
.finish())
}
async fn create_pool() -> anyhow::Result<PgPool> {
dotenv::dotenv()?;
let user = env::var("POSTGRES_USER")?;
let password = env::var("POSTGRES_PASSWORD")?;
let db = env::var("POSTGRES_DB")?;
let port = env::var("POSTGRES_PORT")?;
let url = format!("postgres://{user}:{password}@localhost:{port}/{db}");
let pool = timeout(
Duration::from_secs(5),
PgPoolOptions::new().max_connections(10).connect(&url),
)
.await??;
Ok(pool)
}