fix: auto-fix code issues (cron)
- 修复重复导入/字段 - 修复异常处理 - 修复PEP8格式问题 - 添加类型注解
This commit is contained in:
@@ -4,12 +4,13 @@ InsightFlow LLM Client - Phase 4
|
||||
用于与 Kimi API 交互,支持 RAG 问答和 Agent 功能
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import httpx
|
||||
from typing import List, Dict, AsyncGenerator
|
||||
import os
|
||||
from collections.abc import AsyncGenerator
|
||||
from dataclasses import dataclass
|
||||
|
||||
import httpx
|
||||
|
||||
KIMI_API_KEY = os.getenv("KIMI_API_KEY", "")
|
||||
KIMI_BASE_URL = os.getenv("KIMI_BASE_URL", "https://api.kimi.com/coding")
|
||||
|
||||
@@ -44,7 +45,7 @@ class LLMClient:
|
||||
self.base_url = base_url or KIMI_BASE_URL
|
||||
self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
||||
|
||||
async def chat(self, messages: List[ChatMessage], temperature: float = 0.3, stream: bool = False) -> str:
|
||||
async def chat(self, messages: list[ChatMessage], temperature: float = 0.3, stream: bool = False) -> str:
|
||||
"""发送聊天请求"""
|
||||
if not self.api_key:
|
||||
raise ValueError("KIMI_API_KEY not set")
|
||||
@@ -64,7 +65,7 @@ class LLMClient:
|
||||
result = response.json()
|
||||
return result["choices"][0]["message"]["content"]
|
||||
|
||||
async def chat_stream(self, messages: List[ChatMessage], temperature: float = 0.3) -> AsyncGenerator[str, None]:
|
||||
async def chat_stream(self, messages: list[ChatMessage], temperature: float = 0.3) -> AsyncGenerator[str, None]:
|
||||
"""流式聊天请求"""
|
||||
if not self.api_key:
|
||||
raise ValueError("KIMI_API_KEY not set")
|
||||
@@ -96,7 +97,7 @@ class LLMClient:
|
||||
|
||||
async def extract_entities_with_confidence(
|
||||
self, text: str
|
||||
) -> tuple[List[EntityExtractionResult], List[RelationExtractionResult]]:
|
||||
) -> tuple[list[EntityExtractionResult], list[RelationExtractionResult]]:
|
||||
"""提取实体和关系,带置信度分数"""
|
||||
prompt = f"""从以下会议文本中提取关键实体和它们之间的关系,以 JSON 格式返回:
|
||||
|
||||
@@ -152,7 +153,7 @@ class LLMClient:
|
||||
print(f"Parse extraction result failed: {e}")
|
||||
return [], []
|
||||
|
||||
async def rag_query(self, query: str, context: str, project_context: Dict) -> str:
|
||||
async def rag_query(self, query: str, context: str, project_context: dict) -> str:
|
||||
"""RAG 问答 - 基于项目上下文回答问题"""
|
||||
prompt = f"""你是一个专业的项目分析助手。基于以下项目信息回答问题:
|
||||
|
||||
@@ -174,7 +175,7 @@ class LLMClient:
|
||||
|
||||
return await self.chat(messages, temperature=0.3)
|
||||
|
||||
async def agent_command(self, command: str, project_context: Dict) -> Dict:
|
||||
async def agent_command(self, command: str, project_context: dict) -> dict:
|
||||
"""Agent 指令解析 - 将自然语言指令转换为结构化操作"""
|
||||
prompt = f"""解析以下用户指令,转换为结构化操作:
|
||||
|
||||
@@ -214,7 +215,7 @@ class LLMClient:
|
||||
except BaseException:
|
||||
return {"intent": "unknown", "explanation": "解析失败"}
|
||||
|
||||
async def analyze_entity_evolution(self, entity_name: str, mentions: List[Dict]) -> str:
|
||||
async def analyze_entity_evolution(self, entity_name: str, mentions: list[dict]) -> str:
|
||||
"""分析实体在项目中的演变/态度变化"""
|
||||
mentions_text = "\n".join(
|
||||
[f"[{m.get('created_at', '未知时间')}] {m.get('text_snippet', '')}" for m in mentions[:20]] # 限制数量
|
||||
|
||||
Reference in New Issue
Block a user