62 lines
1.7 KiB
Rust
62 lines
1.7 KiB
Rust
use std::pin::Pin;
|
|
|
|
use async_openai::types::chat::ChatCompletionTool;
|
|
use db::cache::AppCache;
|
|
use db::database::AppDatabase;
|
|
use models::agents::model;
|
|
use models::projects::project;
|
|
use models::repos::repo;
|
|
use models::rooms::{room, room_message};
|
|
use models::users::user;
|
|
use std::collections::HashMap;
|
|
use uuid::Uuid;
|
|
|
|
/// Maximum recursion rounds for tool-call loops (AI → tool → result → AI).
|
|
pub const DEFAULT_MAX_TOOL_DEPTH: usize = 3;
|
|
|
|
/// A single chunk from an AI streaming response.
|
|
#[derive(Debug, Clone)]
|
|
pub struct AiStreamChunk {
|
|
pub content: String,
|
|
pub done: bool,
|
|
}
|
|
|
|
/// Optional streaming callback: called for each token chunk.
|
|
pub type StreamCallback = Box<
|
|
dyn Fn(AiStreamChunk) -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> + Send + Sync,
|
|
>;
|
|
|
|
pub struct AiRequest {
|
|
pub db: AppDatabase,
|
|
pub cache: AppCache,
|
|
pub model: model::Model,
|
|
pub project: project::Model,
|
|
pub sender: user::Model,
|
|
pub room: room::Model,
|
|
pub input: String,
|
|
pub mention: Vec<Mention>,
|
|
pub history: Vec<room_message::Model>,
|
|
/// Optional user name mapping: user_id -> username
|
|
pub user_names: HashMap<Uuid, String>,
|
|
pub temperature: f64,
|
|
pub max_tokens: i32,
|
|
pub top_p: f64,
|
|
pub frequency_penalty: f64,
|
|
pub presence_penalty: f64,
|
|
pub think: bool,
|
|
/// OpenAI tool definitions. If None or empty, tool calling is disabled.
|
|
pub tools: Option<Vec<ChatCompletionTool>>,
|
|
/// Maximum tool-call recursion depth (AI → tool → result → AI loops). Default: 3.
|
|
pub max_tool_depth: usize,
|
|
}
|
|
|
|
pub enum Mention {
|
|
User(user::Model),
|
|
Repo(repo::Model),
|
|
}
|
|
|
|
pub mod context;
|
|
pub mod service;
|
|
pub use context::{AiContextSenderType, RoomMessageContext};
|
|
pub use service::ChatService;
|