gitdataai/libs/git/hook/pool/redis.rs
ZhenYi 8fb2436f22 feat(git): add Redis-backed hook worker with per-repo distributed locking
- pool/worker.rs: single-threaded consumer that BLMPOPs from Redis queues
  sequentially. K8s replicas provide HA — each pod runs one worker.
- pool/redis.rs: RedisConsumer with BLMOVE atomic dequeue, ACK/NAK, and
  retry-with-json support.
- pool/types.rs: HookTask, TaskType, PoolConfig (minimal — no pool metrics).
- sync/lock.rs: Redis SET NX EX per-repo lock to prevent concurrent workers
  from processing the same repo. Lock conflicts are handled by requeueing
  without incrementing retry count.
- hook/mod.rs: HookService.start_worker() spawns the background worker.
- ssh/mod.rs / http/mod.rs: ReceiveSyncService RPUSHes to Redis queue.
  Both run_http and run_ssh call start_worker() to launch the consumer.
- Lock conflicts (GitError::Locked) in the worker are requeued without
  incrementing retry_count so another worker can pick them up.
2026-04-17 12:33:58 +08:00

168 lines
5.2 KiB
Rust

use crate::error::GitError;
use crate::hook::pool::types::HookTask;
use deadpool_redis::cluster::Connection as RedisConn;
use slog::Logger;
/// Redis List consumer using BLMOVE for atomic move-from-queue-to-work pattern.
pub struct RedisConsumer {
pool: deadpool_redis::cluster::Pool,
/// Hash-tag-prefixed key prefix, e.g. "{hook}".
prefix: String,
block_timeout_secs: u64,
logger: Logger,
}
impl RedisConsumer {
pub fn new(
pool: deadpool_redis::cluster::Pool,
prefix: String,
block_timeout_secs: u64,
logger: Logger,
) -> Self {
Self {
pool,
prefix,
block_timeout_secs,
logger,
}
}
/// Atomically moves a task from the main queue to the work queue using BLMOVE.
/// Blocks up to `block_timeout_secs` waiting for a task.
///
/// Returns `Some((HookTask, task_json))` where `task_json` is the raw JSON string
/// needed for LREM on ACK. Returns `None` if the blocking timed out.
pub async fn next(&self, task_type: &str) -> Result<Option<(HookTask, String)>, GitError> {
let queue_key = format!("{}:{}", self.prefix, task_type);
let work_key = format!("{}:{}:work", self.prefix, task_type);
let redis = self
.pool
.get()
.await
.map_err(|e| GitError::Internal(format!("redis pool get failed: {}", e)))?;
let mut conn: RedisConn = redis;
// BLMOVE source destination <LEFT|RIGHT> <LEFT|RIGHT> timeout
let task_json: Option<String> = redis::cmd("BLMOVE")
.arg(&queue_key)
.arg(&work_key)
.arg("RIGHT")
.arg("LEFT")
.arg(self.block_timeout_secs)
.query_async(&mut conn)
.await
.map_err(|e| GitError::Internal(format!("BLMOVE failed: {}", e)))?;
match task_json {
Some(json) => {
match serde_json::from_str::<HookTask>(&json) {
Ok(task) => {
slog::debug!(self.logger, "task dequeued";
"task_id" => %task.id,
"task_type" => %task.task_type,
"queue" => %queue_key
);
Ok(Some((task, json)))
}
Err(e) => {
// Malformed task — remove from work queue and discard
slog::warn!(self.logger, "malformed task JSON, discarding";
"error" => %e,
"queue" => %work_key
);
let _ = self.ack_raw(&work_key, &json).await;
Ok(None)
}
}
}
None => {
// Timed out, no task available
Ok(None)
}
}
}
/// Acknowledge a task: remove it from the work queue (LREM).
pub async fn ack(&self, work_key: &str, task_json: &str) -> Result<(), GitError> {
self.ack_raw(work_key, task_json).await
}
async fn ack_raw(&self, work_key: &str, task_json: &str) -> Result<(), GitError> {
let redis = self
.pool
.get()
.await
.map_err(|e| GitError::Internal(format!("redis pool get failed: {}", e)))?;
let mut conn: RedisConn = redis;
let _: i64 = redis::cmd("LREM")
.arg(work_key)
.arg(-1)
.arg(task_json)
.query_async(&mut conn)
.await
.map_err(|e| GitError::Internal(format!("LREM failed: {}", e)))?;
Ok(())
}
/// Negative acknowledge (retry): remove from work queue and push back to main queue.
pub async fn nak(
&self,
work_key: &str,
queue_key: &str,
task_json: &str,
) -> Result<(), GitError> {
self.nak_with_retry(work_key, queue_key, task_json, task_json).await
}
/// Negative acknowledge with a different (updated) task JSON — used to
/// requeue with an incremented retry_count.
pub async fn nak_with_retry(
&self,
work_key: &str,
queue_key: &str,
old_task_json: &str,
new_task_json: &str,
) -> Result<(), GitError> {
self.ack_raw(work_key, old_task_json).await?;
let redis = self
.pool
.get()
.await
.map_err(|e| GitError::Internal(format!("redis pool get failed: {}", e)))?;
let mut conn: RedisConn = redis;
let _: i64 = redis::cmd("LPUSH")
.arg(queue_key)
.arg(new_task_json)
.query_async(&mut conn)
.await
.map_err(|e| GitError::Internal(format!("LPUSH retry failed: {}", e)))?;
slog::warn!(self.logger, "task nack'd and requeued queue={}", queue_key);
Ok(())
}
pub fn prefix(&self) -> &str {
&self.prefix
}
}
impl Clone for RedisConsumer {
fn clone(&self) -> Self {
Self {
pool: self.pool.clone(),
prefix: self.prefix.clone(),
block_timeout_secs: self.block_timeout_secs,
logger: self.logger.clone(),
}
}
}