diff --git a/libs/git/http/lfs.rs b/libs/git/http/lfs.rs index 2c2d939..11fc622 100644 --- a/libs/git/http/lfs.rs +++ b/libs/git/http/lfs.rs @@ -12,6 +12,8 @@ use std::collections::HashMap; use std::path::PathBuf; const LFS_AUTH_TOKEN_EXPIRY: u64 = 3600; +/// Maximum LFS object size in bytes (50 GiB, matching GitHub/Gitea default). +const LFS_MAX_OBJECT_SIZE: i64 = 50 * 1024 * 1024 * 1024; #[derive(Deserialize, Serialize)] pub struct BatchRequest { @@ -128,6 +130,15 @@ impl LfsHandler { ))); } + for obj in &req.objects { + if obj.size > LFS_MAX_OBJECT_SIZE { + return Err(GitError::InvalidOid(format!( + "Object size {} exceeds maximum allowed size {}", + obj.size, LFS_MAX_OBJECT_SIZE + ))); + } + } + let oids: Vec<&str> = req.objects.iter().map(|o| o.oid.as_str()).collect(); // Single batch query for all OIDs @@ -260,6 +271,15 @@ impl LfsHandler { while let Some(chunk) = payload.next().await { let chunk = chunk.map_err(|e| GitError::Internal(format!("Payload error: {}", e)))?; size += chunk.len() as i64; + // Hard limit: abort if we exceed the max LFS object size. + // This prevents unbounded disk usage from a malicious or misbehaving client. + if size > LFS_MAX_OBJECT_SIZE { + let _ = tokio::fs::remove_file(&temp_path).await; + return Err(GitError::InvalidOid(format!( + "Object size exceeds maximum allowed size {}", + LFS_MAX_OBJECT_SIZE + ))); + } hasher.update(&chunk); if let Err(e) = file.write_all(&chunk).await { let _ = tokio::fs::remove_file(&temp_path).await; diff --git a/libs/git/http/rate_limit.rs b/libs/git/http/rate_limit.rs index b230030..88f508e 100644 --- a/libs/git/http/rate_limit.rs +++ b/libs/git/http/rate_limit.rs @@ -36,6 +36,12 @@ struct RateLimitBucket { reset_time: Instant, } +#[derive(Clone, Copy)] +enum BucketOp { + Read, + Write, +} + pub struct RateLimiter { buckets: Arc>>, config: RateLimitConfig, @@ -51,23 +57,23 @@ impl RateLimiter { pub async fn is_ip_read_allowed(&self, ip: &str) -> bool { let key = format!("ip:read:{}", ip); - self.is_allowed(&key, self.config.read_requests_per_window) + self.is_allowed(&key, BucketOp::Read, self.config.read_requests_per_window) .await } pub async fn is_ip_write_allowed(&self, ip: &str) -> bool { let key = format!("ip:write:{}", ip); - self.is_allowed(&key, self.config.write_requests_per_window) + self.is_allowed(&key, BucketOp::Write, self.config.write_requests_per_window) .await } pub async fn is_repo_write_allowed(&self, ip: &str, repo_path: &str) -> bool { let key = format!("repo:write:{}:{}", ip, repo_path); - self.is_allowed(&key, self.config.write_requests_per_window) + self.is_allowed(&key, BucketOp::Write, self.config.write_requests_per_window) .await } - async fn is_allowed(&self, key: &str, limit: u32) -> bool { + async fn is_allowed(&self, key: &str, op: BucketOp, limit: u32) -> bool { let now = Instant::now(); let mut buckets = self.buckets.write().await; @@ -85,12 +91,19 @@ impl RateLimiter { bucket.reset_time = now + Duration::from_secs(self.config.window_secs); } - // Use read_count for both read/write since we don't distinguish in bucket - if bucket.read_count >= limit { + let over_limit = match op { + BucketOp::Read => bucket.read_count >= limit, + BucketOp::Write => bucket.write_count >= limit, + }; + + if over_limit { return false; } - bucket.read_count += 1; + match op { + BucketOp::Read => bucket.read_count += 1, + BucketOp::Write => bucket.write_count += 1, + } true }