gitdataai/libs/config/hook.rs
ZhenYi 1fed9fc8ab fix(git/hook): address review findings — fs blocking, redis timeout, backoff, slog
- sync/mod.rs: wrap scan_skills_from_dir in spawn_blocking to avoid
  blocking the async executor; use to_path_buf() to get owned PathBuf
- pool/worker.rs: replace 500ms poll sleep with cancellation-is_cancelled
  check (eliminates artificial latency); add exponential backoff on Redis
  errors (1s base, 32s cap, reset on success)
- pool/redis.rs: add 5s timeout on pool.get() for all three methods
  (next, ack_raw, nak_with_retry) to prevent indefinite blocking on
  unresponsive Redis
- sync/gc.rs: add comment explaining why git gc --auto non-zero exit
  is benign
- webhook_dispatch.rs: remove unnecessary format! wrappers in slog macros
- config/hook.rs: document max_concurrent intent (K8s operator/HPA, not
  the single-threaded worker itself)
2026-04-17 13:20:31 +08:00

91 lines
2.8 KiB
Rust

use crate::AppConfig;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PoolConfig {
/// Intended concurrency (used by K8s operator/HPA, not the worker itself).
/// The worker is single-threaded by design; K8s replicas provide parallelism.
pub max_concurrent: usize,
pub cpu_threshold: f32,
/// Hash-tag-prefixed Redis key prefix for hook task queues.
/// Example: "{hook}" — full keys will be "{hook}:sync", "{hook}:sync:work", etc.
pub redis_list_prefix: String,
/// Redis channel for task logs (PubSub).
pub redis_log_channel: String,
/// BLMOVE blocking timeout in seconds (0 = infinite).
pub redis_block_timeout_secs: u64,
/// Max retry attempts before discarding a failed task.
pub redis_max_retries: usize,
pub worker_id: String,
}
impl PoolConfig {
pub fn from_env(config: &AppConfig) -> Self {
let max_concurrent = config
.env
.get("HOOK_POOL_MAX_CONCURRENT")
.and_then(|v| v.parse().ok())
.unwrap_or_else(num_cpus::get);
let cpu_threshold = config
.env
.get("HOOK_POOL_CPU_THRESHOLD")
.and_then(|v| v.parse().ok())
.unwrap_or(80.0);
let redis_list_prefix = config
.env
.get("HOOK_POOL_REDIS_LIST_PREFIX")
.cloned()
.unwrap_or_else(|| "{hook}".to_string());
let redis_log_channel = config
.env
.get("HOOK_POOL_REDIS_LOG_CHANNEL")
.cloned()
.unwrap_or_else(|| "hook:logs".to_string());
let redis_block_timeout_secs = config
.env
.get("HOOK_POOL_REDIS_BLOCK_TIMEOUT")
.and_then(|v| v.parse().ok())
.unwrap_or(5);
let redis_max_retries = config
.env
.get("HOOK_POOL_REDIS_MAX_RETRIES")
.and_then(|v| v.parse().ok())
.unwrap_or(3);
let worker_id = config
.env
.get("HOOK_POOL_WORKER_ID")
.cloned()
.unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
Self {
max_concurrent,
cpu_threshold,
redis_list_prefix,
redis_log_channel,
redis_block_timeout_secs,
redis_max_retries,
worker_id,
}
}
}
impl Default for PoolConfig {
fn default() -> Self {
Self {
max_concurrent: num_cpus::get(),
cpu_threshold: 80.0,
redis_list_prefix: "{hook}".to_string(),
redis_log_channel: "hook:logs".to_string(),
redis_block_timeout_secs: 5,
redis_max_retries: 3,
worker_id: uuid::Uuid::new_v4().to_string(),
}
}
}