210 lines
6.4 KiB
Rust
210 lines
6.4 KiB
Rust
use async_openai::Client;
|
|
use async_openai::types::embeddings::CreateEmbeddingRequestArgs;
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
use crate::embed::qdrant::QdrantClient;
|
|
|
|
pub struct EmbedClient {
|
|
openai: Client<async_openai::config::OpenAIConfig>,
|
|
qdrant: QdrantClient,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct EmbedVector {
|
|
pub id: String,
|
|
pub vector: Vec<f32>,
|
|
pub payload: EmbedPayload,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct EmbedPayload {
|
|
pub entity_type: String,
|
|
pub entity_id: String,
|
|
pub text: String,
|
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
|
pub extra: Option<serde_json::Value>,
|
|
}
|
|
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct SearchResult {
|
|
pub id: String,
|
|
pub score: f32,
|
|
pub payload: EmbedPayload,
|
|
}
|
|
|
|
impl EmbedClient {
|
|
pub fn new(openai: Client<async_openai::config::OpenAIConfig>, qdrant: QdrantClient) -> Self {
|
|
Self { openai, qdrant }
|
|
}
|
|
|
|
pub async fn embed_text(&self, text: &str, model: &str) -> crate::Result<Vec<f32>> {
|
|
let request = CreateEmbeddingRequestArgs::default()
|
|
.model(model)
|
|
.input(text)
|
|
.build()
|
|
.map_err(|e| crate::AgentError::OpenAi(e.to_string()))?;
|
|
|
|
let response = self
|
|
.openai
|
|
.embeddings()
|
|
.create(request)
|
|
.await
|
|
.map_err(|e| crate::AgentError::OpenAi(e.to_string()))?;
|
|
|
|
response
|
|
.data
|
|
.first()
|
|
.map(|d| d.embedding.clone())
|
|
.ok_or_else(|| crate::AgentError::OpenAi("no embedding returned".into()))
|
|
}
|
|
|
|
pub async fn embed_batch(&self, texts: &[String], model: &str) -> crate::Result<Vec<Vec<f32>>> {
|
|
let request = CreateEmbeddingRequestArgs::default()
|
|
.model(model)
|
|
.input(texts.to_vec())
|
|
.build()
|
|
.map_err(|e| crate::AgentError::OpenAi(e.to_string()))?;
|
|
|
|
let response = self
|
|
.openai
|
|
.embeddings()
|
|
.create(request)
|
|
.await
|
|
.map_err(|e| crate::AgentError::OpenAi(e.to_string()))?;
|
|
|
|
let mut embeddings = vec![Vec::new(); texts.len()];
|
|
for data in response.data {
|
|
if (data.index as usize) < embeddings.len() {
|
|
embeddings[data.index as usize] = data.embedding;
|
|
}
|
|
}
|
|
Ok(embeddings)
|
|
}
|
|
|
|
pub async fn upsert(&self, points: Vec<EmbedVector>) -> crate::Result<()> {
|
|
self.qdrant.upsert_points(points).await
|
|
}
|
|
|
|
pub async fn search(
|
|
&self,
|
|
query: &str,
|
|
entity_type: &str,
|
|
model: &str,
|
|
limit: usize,
|
|
) -> crate::Result<Vec<SearchResult>> {
|
|
let vector = self.embed_text(query, model).await?;
|
|
self.qdrant.search(&vector, entity_type, limit).await
|
|
}
|
|
|
|
pub async fn search_with_filter(
|
|
&self,
|
|
query: &str,
|
|
entity_type: &str,
|
|
model: &str,
|
|
limit: usize,
|
|
filter: qdrant_client::qdrant::Filter,
|
|
) -> crate::Result<Vec<SearchResult>> {
|
|
let vector = self.embed_text(query, model).await?;
|
|
self.qdrant
|
|
.search_with_filter(&vector, entity_type, limit, filter)
|
|
.await
|
|
}
|
|
|
|
pub async fn delete_by_entity_id(
|
|
&self,
|
|
entity_type: &str,
|
|
entity_id: &str,
|
|
) -> crate::Result<()> {
|
|
self.qdrant.delete_by_filter(entity_type, entity_id).await
|
|
}
|
|
|
|
pub async fn ensure_collection(&self, entity_type: &str, dimensions: u64) -> crate::Result<()> {
|
|
self.qdrant.ensure_collection(entity_type, dimensions).await
|
|
}
|
|
|
|
pub async fn ensure_memory_collection(&self, dimensions: u64) -> crate::Result<()> {
|
|
self.qdrant.ensure_memory_collection(dimensions).await
|
|
}
|
|
|
|
pub async fn ensure_skill_collection(&self, dimensions: u64) -> crate::Result<()> {
|
|
self.qdrant.ensure_skill_collection(dimensions).await
|
|
}
|
|
|
|
/// Embed and store a conversation memory (message) in Qdrant.
|
|
pub async fn embed_memory(
|
|
&self,
|
|
id: &str,
|
|
text: &str,
|
|
room_id: &str,
|
|
user_id: Option<&str>,
|
|
) -> crate::Result<()> {
|
|
let vector = self.embed_text(text, "").await?;
|
|
let point = EmbedVector {
|
|
id: id.to_string(),
|
|
vector,
|
|
payload: EmbedPayload {
|
|
entity_type: "memory".to_string(),
|
|
entity_id: room_id.to_string(),
|
|
text: text.to_string(),
|
|
extra: serde_json::json!({ "user_id": user_id }).into(),
|
|
},
|
|
};
|
|
self.qdrant.upsert_points(vec![point]).await
|
|
}
|
|
|
|
/// Search memory embeddings by semantic similarity within a room.
|
|
pub async fn search_memories(
|
|
&self,
|
|
query: &str,
|
|
model: &str,
|
|
room_id: &str,
|
|
limit: usize,
|
|
) -> crate::Result<Vec<SearchResult>> {
|
|
let vector = self.embed_text(query, model).await?;
|
|
let mut results = self.qdrant.search_memory(&vector, limit + 1).await?;
|
|
// Filter to the specific room
|
|
results.retain(|r| r.payload.entity_id == room_id);
|
|
results.truncate(limit);
|
|
Ok(results)
|
|
}
|
|
|
|
/// Embed and store a skill in Qdrant.
|
|
pub async fn embed_skill(
|
|
&self,
|
|
id: &str,
|
|
name: &str,
|
|
description: &str,
|
|
content: &str,
|
|
project_uuid: &str,
|
|
) -> crate::Result<()> {
|
|
let text = format!("{}: {} {}", name, description, content);
|
|
let vector = self.embed_text(&text, "").await?;
|
|
let point = EmbedVector {
|
|
id: id.to_string(),
|
|
vector,
|
|
payload: EmbedPayload {
|
|
entity_type: "skill".to_string(),
|
|
entity_id: project_uuid.to_string(),
|
|
text,
|
|
extra: serde_json::json!({ "name": name, "description": description }).into(),
|
|
},
|
|
};
|
|
self.qdrant.upsert_points(vec![point]).await
|
|
}
|
|
|
|
/// Search skill embeddings by semantic similarity within a project.
|
|
pub async fn search_skills(
|
|
&self,
|
|
query: &str,
|
|
model: &str,
|
|
project_uuid: &str,
|
|
limit: usize,
|
|
) -> crate::Result<Vec<SearchResult>> {
|
|
let vector = self.embed_text(query, model).await?;
|
|
let mut results = self.qdrant.search_skill(&vector, limit + 1).await?;
|
|
results.retain(|r| r.payload.entity_id == project_uuid);
|
|
results.truncate(limit);
|
|
Ok(results)
|
|
}
|
|
}
|