fix(git): add LFS upload size limits and fix HTTP rate limiter read/write counter

- Add LFS_MAX_OBJECT_SIZE (50 GiB) and validate object sizes in both the
  batch advisory check and the upload_object streaming loop to prevent
  unbounded disk usage from malicious clients
- Fix HTTP rate limiter: track read_count and write_count separately so
  a burst of writes cannot exhaust the read budget (previously all
  operations incremented read_count regardless of type)
This commit is contained in:
ZhenYi 2026-04-16 20:14:13 +08:00
parent cef4ff1289
commit f5ab554d6b
2 changed files with 40 additions and 7 deletions

View File

@ -12,6 +12,8 @@ use std::collections::HashMap;
use std::path::PathBuf; use std::path::PathBuf;
const LFS_AUTH_TOKEN_EXPIRY: u64 = 3600; const LFS_AUTH_TOKEN_EXPIRY: u64 = 3600;
/// Maximum LFS object size in bytes (50 GiB, matching GitHub/Gitea default).
const LFS_MAX_OBJECT_SIZE: i64 = 50 * 1024 * 1024 * 1024;
#[derive(Deserialize, Serialize)] #[derive(Deserialize, Serialize)]
pub struct BatchRequest { pub struct BatchRequest {
@ -128,6 +130,15 @@ impl LfsHandler {
))); )));
} }
for obj in &req.objects {
if obj.size > LFS_MAX_OBJECT_SIZE {
return Err(GitError::InvalidOid(format!(
"Object size {} exceeds maximum allowed size {}",
obj.size, LFS_MAX_OBJECT_SIZE
)));
}
}
let oids: Vec<&str> = req.objects.iter().map(|o| o.oid.as_str()).collect(); let oids: Vec<&str> = req.objects.iter().map(|o| o.oid.as_str()).collect();
// Single batch query for all OIDs // Single batch query for all OIDs
@ -260,6 +271,15 @@ impl LfsHandler {
while let Some(chunk) = payload.next().await { while let Some(chunk) = payload.next().await {
let chunk = chunk.map_err(|e| GitError::Internal(format!("Payload error: {}", e)))?; let chunk = chunk.map_err(|e| GitError::Internal(format!("Payload error: {}", e)))?;
size += chunk.len() as i64; size += chunk.len() as i64;
// Hard limit: abort if we exceed the max LFS object size.
// This prevents unbounded disk usage from a malicious or misbehaving client.
if size > LFS_MAX_OBJECT_SIZE {
let _ = tokio::fs::remove_file(&temp_path).await;
return Err(GitError::InvalidOid(format!(
"Object size exceeds maximum allowed size {}",
LFS_MAX_OBJECT_SIZE
)));
}
hasher.update(&chunk); hasher.update(&chunk);
if let Err(e) = file.write_all(&chunk).await { if let Err(e) = file.write_all(&chunk).await {
let _ = tokio::fs::remove_file(&temp_path).await; let _ = tokio::fs::remove_file(&temp_path).await;

View File

@ -36,6 +36,12 @@ struct RateLimitBucket {
reset_time: Instant, reset_time: Instant,
} }
#[derive(Clone, Copy)]
enum BucketOp {
Read,
Write,
}
pub struct RateLimiter { pub struct RateLimiter {
buckets: Arc<RwLock<HashMap<String, RateLimitBucket>>>, buckets: Arc<RwLock<HashMap<String, RateLimitBucket>>>,
config: RateLimitConfig, config: RateLimitConfig,
@ -51,23 +57,23 @@ impl RateLimiter {
pub async fn is_ip_read_allowed(&self, ip: &str) -> bool { pub async fn is_ip_read_allowed(&self, ip: &str) -> bool {
let key = format!("ip:read:{}", ip); let key = format!("ip:read:{}", ip);
self.is_allowed(&key, self.config.read_requests_per_window) self.is_allowed(&key, BucketOp::Read, self.config.read_requests_per_window)
.await .await
} }
pub async fn is_ip_write_allowed(&self, ip: &str) -> bool { pub async fn is_ip_write_allowed(&self, ip: &str) -> bool {
let key = format!("ip:write:{}", ip); let key = format!("ip:write:{}", ip);
self.is_allowed(&key, self.config.write_requests_per_window) self.is_allowed(&key, BucketOp::Write, self.config.write_requests_per_window)
.await .await
} }
pub async fn is_repo_write_allowed(&self, ip: &str, repo_path: &str) -> bool { pub async fn is_repo_write_allowed(&self, ip: &str, repo_path: &str) -> bool {
let key = format!("repo:write:{}:{}", ip, repo_path); let key = format!("repo:write:{}:{}", ip, repo_path);
self.is_allowed(&key, self.config.write_requests_per_window) self.is_allowed(&key, BucketOp::Write, self.config.write_requests_per_window)
.await .await
} }
async fn is_allowed(&self, key: &str, limit: u32) -> bool { async fn is_allowed(&self, key: &str, op: BucketOp, limit: u32) -> bool {
let now = Instant::now(); let now = Instant::now();
let mut buckets = self.buckets.write().await; let mut buckets = self.buckets.write().await;
@ -85,12 +91,19 @@ impl RateLimiter {
bucket.reset_time = now + Duration::from_secs(self.config.window_secs); bucket.reset_time = now + Duration::from_secs(self.config.window_secs);
} }
// Use read_count for both read/write since we don't distinguish in bucket let over_limit = match op {
if bucket.read_count >= limit { BucketOp::Read => bucket.read_count >= limit,
BucketOp::Write => bucket.write_count >= limit,
};
if over_limit {
return false; return false;
} }
bucket.read_count += 1; match op {
BucketOp::Read => bucket.read_count += 1,
BucketOp::Write => bucket.write_count += 1,
}
true true
} }