Implement basic city portal logic (and severely deabstract)
- Create a podman compose for Postgres - Implement types and tables for urban data - No longer use a Cargo workspace as there's no point currently - Create query functionality (not mutation yet)
This commit is contained in:
parent
69b06ec43b
commit
e5ac7b13b7
14 changed files with 1704 additions and 150 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -1 +1,2 @@
|
|||
/target
|
||||
.env
|
||||
|
|
|
|||
1526
Cargo.lock
generated
1526
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
11
Cargo.toml
11
Cargo.toml
|
|
@ -3,11 +3,14 @@ name = "city_portal"
|
|||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[workspace.dependencies]
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
axum = "0.8"
|
||||
async-graphql = "7"
|
||||
async-graphql-axum = "7"
|
||||
|
||||
[dependencies]
|
||||
gateway = { path = "services/gateway" }
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio", "tls-rustls-aws-lc-rs", "postgres"] }
|
||||
chrono = "0.4"
|
||||
dotenv = "0.15.0"
|
||||
anyhow = "1.0"
|
||||
|
|
|
|||
20
db/init.sql
Normal file
20
db/init.sql
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
CREATE TABLE datasets (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
description TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE locations (
|
||||
id SERIAL PRIMARY KEY,
|
||||
name TEXT,
|
||||
lat DOUBLE PRECISION NOT NULL,
|
||||
lon DOUBLE PRECISION NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE records (
|
||||
id SERIAL PRIMARY KEY,
|
||||
dataset_id INTEGER NOT NULL REFERENCES datasets(id),
|
||||
location_id INTEGER REFERENCES locations(id),
|
||||
timestamp TIMESTAMPTZ NOT NULL,
|
||||
data JSONB
|
||||
);
|
||||
16
podman-compose.yml
Normal file
16
podman-compose.yml
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
services:
|
||||
postgres:
|
||||
image: docker.io/library/postgres:alpine
|
||||
container_name: postgres
|
||||
env_file:
|
||||
- .env
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- ./db/init.sql:/docker-entrypoint-initdb.d/init.sql:Z
|
||||
- postgres_data:/var/lib/postgresql/data:Z
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
[package]
|
||||
name = "gateway"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
tokio.workspace = true
|
||||
axum.workspace = true
|
||||
async-graphql.workspace = true
|
||||
async-graphql-axum.workspace = true
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
use async_graphql_axum::{GraphQLRequest, GraphQLResponse};
|
||||
use axum::{
|
||||
Router,
|
||||
routing::{get, post},
|
||||
};
|
||||
use std::net::SocketAddr;
|
||||
|
||||
mod models;
|
||||
mod resolvers;
|
||||
mod schema;
|
||||
|
||||
#[tokio::main]
|
||||
pub async fn start() {
|
||||
// TODO: Implement abstractions, types, and everything else from a portal_core crate
|
||||
// to have something that can eventually dynamically change and be grown.
|
||||
// This is a big project, and I'm a small student,
|
||||
// so the focus should be on modularity and proof that it can
|
||||
// be grown more than anything else.
|
||||
let schema = schema::build_schema();
|
||||
|
||||
let app = Router::new()
|
||||
.route(
|
||||
"/graphql",
|
||||
post({
|
||||
let schema = schema.clone();
|
||||
move |req: GraphQLRequest| async move {
|
||||
GraphQLResponse::from(schema.execute(req.into_inner()).await)
|
||||
}
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/graphql",
|
||||
get({
|
||||
let schema = schema.clone();
|
||||
move || async move { axum::Json(schema.sdl()) }
|
||||
}),
|
||||
);
|
||||
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], 3000));
|
||||
println!("listening on http://{}", addr);
|
||||
|
||||
axum::serve(
|
||||
tokio::net::TcpListener::bind(addr).await.unwrap(),
|
||||
app.into_make_service(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
use async_graphql::SimpleObject;
|
||||
|
||||
#[derive(SimpleObject)]
|
||||
pub struct User {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
}
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
use crate::models::User;
|
||||
use async_graphql::Object;
|
||||
|
||||
pub struct QueryRoot;
|
||||
|
||||
#[Object]
|
||||
impl QueryRoot {
|
||||
async fn hello(&self) -> &str {
|
||||
"Hello, world!"
|
||||
}
|
||||
|
||||
async fn testing(&self) -> &str {
|
||||
"Testing"
|
||||
}
|
||||
|
||||
async fn user(&self, id: i32) -> User {
|
||||
User {
|
||||
id,
|
||||
name: String::from("Zeph"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
use crate::resolvers::QueryRoot;
|
||||
use async_graphql::{EmptyMutation, EmptySubscription, Schema};
|
||||
|
||||
pub type AppSchema = Schema<QueryRoot, EmptyMutation, EmptySubscription>;
|
||||
|
||||
pub fn build_schema() -> AppSchema {
|
||||
Schema::build(QueryRoot, EmptyMutation, EmptySubscription).finish()
|
||||
}
|
||||
45
src/main.rs
45
src/main.rs
|
|
@ -1,3 +1,44 @@
|
|||
fn main() {
|
||||
gateway::start();
|
||||
use async_graphql_axum::{GraphQLRequest, GraphQLResponse};
|
||||
use axum::{
|
||||
Router,
|
||||
routing::{get, post},
|
||||
};
|
||||
use std::net::SocketAddr;
|
||||
|
||||
mod models;
|
||||
mod resolvers;
|
||||
mod schema;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let schema = schema::build_schema().await?;
|
||||
|
||||
let app = Router::new()
|
||||
.route(
|
||||
"/graphql",
|
||||
post({
|
||||
let schema = schema.clone();
|
||||
move |req: GraphQLRequest| async move {
|
||||
GraphQLResponse::from(schema.execute(req.into_inner()).await)
|
||||
}
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/graphql",
|
||||
get({
|
||||
let schema = schema.clone();
|
||||
move || async move { axum::Json(schema.sdl()) }
|
||||
}),
|
||||
);
|
||||
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], 3000));
|
||||
println!("listening on http://{}", addr);
|
||||
|
||||
axum::serve(
|
||||
tokio::net::TcpListener::bind(addr).await.unwrap(),
|
||||
app.into_make_service(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
83
src/models.rs
Normal file
83
src/models.rs
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
use async_graphql::{ID, Json, SimpleObject};
|
||||
use serde_json::Value;
|
||||
|
||||
#[derive(SimpleObject)]
|
||||
pub struct Dataset {
|
||||
pub id: ID,
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
pub struct DatasetRow {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl From<DatasetRow> for Dataset {
|
||||
fn from(row: DatasetRow) -> Self {
|
||||
Dataset {
|
||||
id: row.id.into(),
|
||||
name: row.name,
|
||||
description: row.description,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(SimpleObject)]
|
||||
pub struct Location {
|
||||
pub id: ID,
|
||||
pub name: Option<String>,
|
||||
pub lat: f64,
|
||||
pub lon: f64,
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
pub struct LocationRow {
|
||||
pub id: i32,
|
||||
pub name: Option<String>,
|
||||
pub lat: f64,
|
||||
pub lon: f64,
|
||||
}
|
||||
|
||||
impl From<LocationRow> for Location {
|
||||
fn from(row: LocationRow) -> Self {
|
||||
Location {
|
||||
id: row.id.into(),
|
||||
name: row.name,
|
||||
lat: row.lat,
|
||||
lon: row.lon,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(SimpleObject)]
|
||||
pub struct Record {
|
||||
pub id: ID,
|
||||
pub dataset_id: ID,
|
||||
pub location_id: Option<ID>,
|
||||
pub timestamp: String,
|
||||
pub data: Option<Json<Value>>,
|
||||
}
|
||||
|
||||
#[derive(sqlx::FromRow)]
|
||||
pub struct RecordRow {
|
||||
pub id: i32,
|
||||
pub dataset_id: i32,
|
||||
pub location_id: Option<i32>,
|
||||
pub timestamp: chrono::DateTime<chrono::Utc>,
|
||||
pub data: Option<Value>,
|
||||
}
|
||||
|
||||
impl From<RecordRow> for Record {
|
||||
fn from(row: RecordRow) -> Self {
|
||||
Record {
|
||||
id: row.id.into(),
|
||||
dataset_id: row.dataset_id.into(),
|
||||
location_id: row.location_id.map(|id| id.into()),
|
||||
timestamp: row.timestamp.to_rfc3339(),
|
||||
data: row.data.map(Json),
|
||||
}
|
||||
}
|
||||
}
|
||||
25
src/resolvers.rs
Normal file
25
src/resolvers.rs
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
use crate::models::*;
|
||||
use async_graphql::{Context, Object};
|
||||
use sqlx::PgPool;
|
||||
|
||||
pub struct QueryRoot;
|
||||
|
||||
#[Object]
|
||||
impl QueryRoot {
|
||||
pub async fn datasets(&self, ctx: &Context<'_>) -> anyhow::Result<Vec<Dataset>> {
|
||||
let pool = ctx
|
||||
.data::<PgPool>()
|
||||
.expect("A database connection does not exist");
|
||||
|
||||
let rows: Vec<DatasetRow> = sqlx::query_as("SELECT * FROM datasets")
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let result = rows.into_iter().map(|r| Dataset::from(r)).collect();
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
async fn query_dataset(&self, id: i32) -> anyhow::Result<Record> {
|
||||
unimplemented!("This doesn't work yet");
|
||||
}
|
||||
}
|
||||
32
src/schema.rs
Normal file
32
src/schema.rs
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
use crate::resolvers::QueryRoot;
|
||||
use async_graphql::{EmptyMutation, EmptySubscription, Schema};
|
||||
use sqlx::{PgPool, postgres::PgPoolOptions};
|
||||
use std::{env, time::Duration};
|
||||
use tokio::time::timeout;
|
||||
|
||||
pub type AppSchema = Schema<QueryRoot, EmptyMutation, EmptySubscription>;
|
||||
|
||||
pub async fn build_schema() -> anyhow::Result<AppSchema> {
|
||||
let pool = create_pool().await?;
|
||||
Ok(Schema::build(QueryRoot, EmptyMutation, EmptySubscription)
|
||||
.data(pool)
|
||||
.finish())
|
||||
}
|
||||
|
||||
async fn create_pool() -> anyhow::Result<PgPool> {
|
||||
dotenv::dotenv()?;
|
||||
let user = env::var("POSTGRES_USER")?;
|
||||
let password = env::var("POSTGRES_PASSWORD")?;
|
||||
let db = env::var("POSTGRES_DB")?;
|
||||
let port = env::var("POSTGRES_PORT")?;
|
||||
|
||||
let url = format!("postgres://{user}:{password}@localhost:{port}/{db}");
|
||||
|
||||
let pool = timeout(
|
||||
Duration::from_secs(5),
|
||||
PgPoolOptions::new().max_connections(10).connect(&url),
|
||||
)
|
||||
.await??;
|
||||
|
||||
Ok(pool)
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue