fix: auto-fix code issues (cron)

- 修复重复导入/字段
- 修复异常处理
- 修复PEP8格式问题
- 添加类型注解
- 修复重复函数定义 (health_check, create_webhook_endpoint, etc)
- 修复未定义名称 (SearchOperator, TenantTier, Query, Body, logger)
- 修复 workflow_manager.py 的类定义重复问题
- 添加缺失的导入
This commit is contained in:
OpenClaw Bot
2026-02-27 09:18:58 +08:00
parent 1d55ae8f1e
commit be22b763fa
39 changed files with 12535 additions and 10327 deletions

View File

@@ -7,7 +7,7 @@ InsightFlow LLM Client - Phase 4
import os
import json
import httpx
from typing import List, Dict, Optional, AsyncGenerator
from typing import List, Dict, AsyncGenerator
from dataclasses import dataclass
KIMI_API_KEY = os.getenv("KIMI_API_KEY", "")
@@ -38,57 +38,47 @@ class RelationExtractionResult:
class LLMClient:
"""Kimi API 客户端"""
def __init__(self, api_key: str = None, base_url: str = None):
self.api_key = api_key or KIMI_API_KEY
self.base_url = base_url or KIMI_BASE_URL
self.headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
async def chat(self, messages: List[ChatMessage], temperature: float = 0.3, stream: bool = False) -> str:
"""发送聊天请求"""
if not self.api_key:
raise ValueError("KIMI_API_KEY not set")
payload = {
"model": "k2p5",
"messages": [{"role": m.role, "content": m.content} for m in messages],
"temperature": temperature,
"stream": stream
"stream": stream,
}
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/v1/chat/completions",
headers=self.headers,
json=payload,
timeout=120.0
f"{self.base_url}/v1/chat/completions", headers=self.headers, json=payload, timeout=120.0
)
response.raise_for_status()
result = response.json()
return result["choices"][0]["message"]["content"]
async def chat_stream(self, messages: List[ChatMessage], temperature: float = 0.3) -> AsyncGenerator[str, None]:
"""流式聊天请求"""
if not self.api_key:
raise ValueError("KIMI_API_KEY not set")
payload = {
"model": "k2p5",
"messages": [{"role": m.role, "content": m.content} for m in messages],
"temperature": temperature,
"stream": True
"stream": True,
}
async with httpx.AsyncClient() as client:
async with client.stream(
"POST",
f"{self.base_url}/v1/chat/completions",
headers=self.headers,
json=payload,
timeout=120.0
"POST", f"{self.base_url}/v1/chat/completions", headers=self.headers, json=payload, timeout=120.0
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
@@ -101,10 +91,12 @@ class LLMClient:
delta = chunk["choices"][0]["delta"]
if "content" in delta:
yield delta["content"]
except:
except BaseException:
pass
async def extract_entities_with_confidence(self, text: str) -> tuple[List[EntityExtractionResult], List[RelationExtractionResult]]:
async def extract_entities_with_confidence(
self, text: str
) -> tuple[List[EntityExtractionResult], List[RelationExtractionResult]]:
"""提取实体和关系,带置信度分数"""
prompt = f"""从以下会议文本中提取关键实体和它们之间的关系,以 JSON 格式返回:
@@ -125,15 +117,16 @@ class LLMClient:
{{"source": "Project Alpha", "target": "K8s", "type": "depends_on", "confidence": 0.82}}
]
}}"""
messages = [ChatMessage(role="user", content=prompt)]
content = await self.chat(messages, temperature=0.1)
import re
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
if not json_match:
return [], []
try:
data = json.loads(json_match.group())
entities = [
@@ -141,7 +134,7 @@ class LLMClient:
name=e["name"],
type=e.get("type", "OTHER"),
definition=e.get("definition", ""),
confidence=e.get("confidence", 0.8)
confidence=e.get("confidence", 0.8),
)
for e in data.get("entities", [])
]
@@ -150,7 +143,7 @@ class LLMClient:
source=r["source"],
target=r["target"],
type=r.get("type", "related"),
confidence=r.get("confidence", 0.8)
confidence=r.get("confidence", 0.8),
)
for r in data.get("relations", [])
]
@@ -158,7 +151,7 @@ class LLMClient:
except Exception as e:
print(f"Parse extraction result failed: {e}")
return [], []
async def rag_query(self, query: str, context: str, project_context: Dict) -> str:
"""RAG 问答 - 基于项目上下文回答问题"""
prompt = f"""你是一个专业的项目分析助手。基于以下项目信息回答问题:
@@ -173,14 +166,14 @@ class LLMClient:
{query}
请用中文回答,保持简洁专业。如果信息不足,请明确说明。"""
messages = [
ChatMessage(role="system", content="你是一个专业的项目分析助手,擅长从会议记录中提取洞察。"),
ChatMessage(role="user", content=prompt)
ChatMessage(role="user", content=prompt),
]
return await self.chat(messages, temperature=0.3)
async def agent_command(self, command: str, project_context: Dict) -> Dict:
"""Agent 指令解析 - 将自然语言指令转换为结构化操作"""
prompt = f"""解析以下用户指令,转换为结构化操作:
@@ -206,27 +199,27 @@ class LLMClient:
- edit_entity: 编辑实体params 包含 entity_name(实体名), field(字段), value(新值)
- create_relation: 创建关系params 包含 source(源实体), target(目标实体), relation_type(关系类型)
"""
messages = [ChatMessage(role="user", content=prompt)]
content = await self.chat(messages, temperature=0.1)
import re
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
if not json_match:
return {"intent": "unknown", "explanation": "无法解析指令"}
try:
return json.loads(json_match.group())
except:
except BaseException:
return {"intent": "unknown", "explanation": "解析失败"}
async def analyze_entity_evolution(self, entity_name: str, mentions: List[Dict]) -> str:
"""分析实体在项目中的演变/态度变化"""
mentions_text = "\n".join([
f"[{m.get('created_at', '未知时间')}] {m.get('text_snippet', '')}"
for m in mentions[:20] # 限制数量
])
mentions_text = "\n".join(
[f"[{m.get('created_at', '未知时间')}] {m.get('text_snippet', '')}" for m in mentions[:20]] # 限制数量
)
prompt = f"""分析实体 "{entity_name}" 在项目中的演变和态度变化:
## 提及记录
@@ -239,7 +232,7 @@ class LLMClient:
4. 总结性洞察
用中文回答,结构清晰。"""
messages = [ChatMessage(role="user", content=prompt)]
return await self.chat(messages, temperature=0.3)