gitdataai/apps/app/src/main.rs
2026-04-14 19:02:01 +08:00

211 lines
6.6 KiB
Rust

use actix_cors::Cors;
use actix_web::cookie::time::Duration;
use actix_web::middleware::Logger;
use actix_web::{App, HttpResponse, HttpServer, cookie::Key, web};
use clap::Parser;
use db::cache::AppCache;
use db::database::AppDatabase;
use sea_orm::ConnectionTrait;
use service::AppService;
use session::SessionMiddleware;
use session::config::{PersistentSession, SessionLifecycle, TtlExtensionPolicy};
use session::storage::RedisClusterSessionStore;
use slog::Drain;
mod args;
mod logging;
use args::ServerArgs;
use config::AppConfig;
use migrate::{Migrator, MigratorTrait};
#[derive(Clone)]
pub struct AppState {
pub db: AppDatabase,
pub cache: AppCache,
}
fn build_slog_logger(level: &str) -> slog::Logger {
let level_filter = match level {
"trace" => 0usize,
"debug" => 1usize,
"info" => 2usize,
"warn" => 3usize,
"error" => 4usize,
_ => 2usize,
};
struct StderrDrain(usize);
impl Drain for StderrDrain {
type Ok = ();
type Err = ();
#[inline]
fn log(&self, record: &slog::Record, _logger: &slog::OwnedKVList) -> Result<(), ()> {
let slog_level = match record.level() {
slog::Level::Trace => 0,
slog::Level::Debug => 1,
slog::Level::Info => 2,
slog::Level::Warning => 3,
slog::Level::Error => 4,
slog::Level::Critical => 5,
};
if slog_level < self.0 {
return Ok(());
}
let _ = eprintln!(
"{} [{}] {}:{} - {}",
chrono::Utc::now().format("%Y-%m-%dT%H:%M:%S%.3fZ"),
record.level().to_string(),
record
.file()
.rsplit_once('/')
.map(|(_, s)| s)
.unwrap_or(record.file()),
record.line(),
record.msg(),
);
Ok(())
}
}
let drain = StderrDrain(level_filter);
let drain = std::sync::Mutex::new(drain);
let drain = slog::Fuse::new(drain);
slog::Logger::root(drain, slog::o!())
}
fn build_session_key(cfg: &AppConfig) -> anyhow::Result<Key> {
if let Some(secret) = cfg.env.get("APP_SESSION_SECRET") {
let bytes: Vec<u8> = secret.as_bytes().iter().cycle().take(64).copied().collect();
return Ok(Key::from(&bytes));
}
Ok(Key::generate())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cfg = AppConfig::load();
let log_level = cfg.log_level().unwrap_or_else(|_| "info".to_string());
let log = build_slog_logger(&log_level);
slog::info!(
log,
"Starting {} {}",
cfg.app_name().unwrap_or_default(),
cfg.app_version().unwrap_or_default()
);
let db = AppDatabase::init(&cfg).await?;
slog::info!(log, "Database connected");
let redis_urls = cfg.redis_urls()?;
let store: RedisClusterSessionStore = RedisClusterSessionStore::new(redis_urls).await?;
slog::info!(log, "Redis connected");
let cache = AppCache::init(&cfg).await?;
slog::info!(log, "Cache initialized");
run_migrations(&db, &log).await?;
let session_key = build_session_key(&cfg)?;
let args = ServerArgs::parse();
let service = AppService::new(cfg.clone()).await?;
slog::info!(log, "AppService initialized");
let (shutdown_tx, shutdown_rx) = tokio::sync::broadcast::channel::<()>(1);
let worker_service = service.clone();
let log_for_http = log.clone();
let log_for_worker = log.clone();
let worker_handle = tokio::spawn(async move {
worker_service
.start_room_workers(shutdown_rx, log_for_worker)
.await
});
let bind_addr = args.bind.unwrap_or_else(|| "127.0.0.1:8080".to_string());
slog::info!(log, "Listening on {}", bind_addr);
HttpServer::new(move || {
let cors = Cors::default()
.allow_any_origin()
.allow_any_method()
.allow_any_header()
.supports_credentials()
.max_age(3600);
let session_mw = SessionMiddleware::builder(store.clone(), session_key.clone())
.cookie_name("id".to_string())
.cookie_path("/".to_string())
.cookie_secure(false)
.cookie_http_only(true)
.session_lifecycle(SessionLifecycle::PersistentSession(
PersistentSession::default()
.session_ttl(Duration::days(30))
.session_ttl_extension_policy(TtlExtensionPolicy::OnEveryRequest),
))
.build();
App::new()
.wrap(cors)
.wrap(session_mw)
.wrap(Logger::default().exclude("/health"))
.app_data(web::Data::new(AppState {
db: db.clone(),
cache: cache.clone(),
}))
.app_data(web::Data::new(service.clone()))
.app_data(web::Data::new(cfg.clone()))
.app_data(web::Data::new(db.clone()))
.app_data(web::Data::new(cache.clone()))
.wrap(logging::RequestLogger::new(log_for_http.clone()))
.route("/health", web::get().to(health_check))
.configure(api::route::init_routes)
})
.bind(&bind_addr)?
.run()
.await?;
slog::info!(log, "Server stopped, shutting down room workers");
let _ = shutdown_tx.send(());
let _ = worker_handle.await;
slog::info!(log, "Room workers stopped");
Ok(())
}
async fn run_migrations(db: &AppDatabase, log: &slog::Logger) -> anyhow::Result<()> {
slog::info!(log, "Running database migrations...");
Migrator::up(db.writer(), None)
.await
.map_err(|e| anyhow::anyhow!("Migration failed: {:?}", e))?;
slog::info!(log, "Migrations completed");
Ok(())
}
async fn health_check(state: web::Data<AppState>) -> HttpResponse {
let db_ok = db_ping(&state.db).await;
let cache_ok = cache_ping(&state.cache).await;
let healthy = db_ok && cache_ok;
if healthy {
HttpResponse::Ok().json(serde_json::json!({
"status": "ok",
"db": "ok",
"cache": "ok",
}))
} else {
HttpResponse::ServiceUnavailable().json(serde_json::json!({
"status": "unhealthy",
"db": if db_ok { "ok" } else { "error" },
"cache": if cache_ok { "ok" } else { "error" },
}))
}
}
async fn db_ping(db: &AppDatabase) -> bool {
db.query_one_raw(sea_orm::Statement::from_string(
sea_orm::DbBackend::Postgres,
"SELECT 1",
))
.await
.is_ok()
}
async fn cache_ping(cache: &AppCache) -> bool {
cache.conn().await.is_ok()
}