gitdataai/libs/git/hook/mod.rs
ZhenYi ef61b193c4 fix(git/hook): refine Redis queue worker, remove dead code, fix warnings
- pool/mod.rs: pass shared http_client Arc to HookWorker
- worker.rs: remove double-locking (sync() manages its own lock),
  await all webhook handles before returning, share http_client,
  hoist namespace query out of loop
- redis.rs: atomic NAK via Lua script (LREM + LPUSH in one eval)
- sync/lock.rs: increase LOCK_TTL from 60s to 300s for large repos
- sync/mod.rs: split sync/sync_work, fsck_only/fsck_work, gc_only/gc_work
  so callers can choose locked vs lock-free path; run_gc + sync_skills
  outside the DB transaction
- hook/mod.rs: remove unused http field from HookService
- ssh/mod.rs, http/mod.rs: remove unused HookService/http imports
2026-04-17 13:05:07 +08:00

56 lines
1.4 KiB
Rust

use config::AppConfig;
use db::cache::AppCache;
use db::database::AppDatabase;
use deadpool_redis::cluster::Pool as RedisPool;
use slog::Logger;
use tokio_util::sync::CancellationToken;
pub mod pool;
pub mod sync;
pub mod webhook_dispatch;
pub use pool::{HookWorker, PoolConfig, RedisConsumer};
pub use pool::types::{HookTask, TaskType};
/// Hook service that manages the Redis-backed task queue worker.
/// Multiple gitserver pods can run concurrently — the worker acquires a
/// per-repo Redis lock before processing each task.
#[derive(Clone)]
pub struct HookService {
pub(crate) db: AppDatabase,
pub(crate) cache: AppCache,
pub(crate) redis_pool: RedisPool,
pub(crate) logger: Logger,
pub(crate) config: AppConfig,
}
impl HookService {
pub fn new(
db: AppDatabase,
cache: AppCache,
redis_pool: RedisPool,
logger: Logger,
config: AppConfig,
) -> Self {
Self {
db,
cache,
redis_pool,
logger,
config,
}
}
/// Start the background worker and return a cancellation token.
pub fn start_worker(&self) -> CancellationToken {
let pool_config = PoolConfig::from_env(&self.config);
pool::start_worker(
self.db.clone(),
self.cache.clone(),
self.redis_pool.clone(),
self.logger.clone(),
pool_config,
)
}
}