fix: auto-fix code issues (cron)
- 修复重复导入/字段 - 修复异常处理 - 修复PEP8格式问题 - 添加类型注解 - 修复重复函数定义 (health_check, create_webhook_endpoint, etc) - 修复未定义名称 (SearchOperator, TenantTier, Query, Body, logger) - 修复 workflow_manager.py 的类定义重复问题 - 添加缺失的导入
This commit is contained in:
@@ -7,7 +7,7 @@ InsightFlow Knowledge Reasoning - Phase 5
|
||||
import os
|
||||
import json
|
||||
import httpx
|
||||
from typing import List, Dict, Optional, Any
|
||||
from typing import List, Dict
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
@@ -17,76 +17,65 @@ KIMI_BASE_URL = os.getenv("KIMI_BASE_URL", "https://api.kimi.com/coding")
|
||||
|
||||
class ReasoningType(Enum):
|
||||
"""推理类型"""
|
||||
CAUSAL = "causal" # 因果推理
|
||||
ASSOCIATIVE = "associative" # 关联推理
|
||||
TEMPORAL = "temporal" # 时序推理
|
||||
COMPARATIVE = "comparative" # 对比推理
|
||||
SUMMARY = "summary" # 总结推理
|
||||
|
||||
CAUSAL = "causal" # 因果推理
|
||||
ASSOCIATIVE = "associative" # 关联推理
|
||||
TEMPORAL = "temporal" # 时序推理
|
||||
COMPARATIVE = "comparative" # 对比推理
|
||||
SUMMARY = "summary" # 总结推理
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReasoningResult:
|
||||
"""推理结果"""
|
||||
|
||||
answer: str
|
||||
reasoning_type: ReasoningType
|
||||
confidence: float
|
||||
evidence: List[Dict] # 支撑证据
|
||||
related_entities: List[str] # 相关实体
|
||||
gaps: List[str] # 知识缺口
|
||||
evidence: List[Dict] # 支撑证据
|
||||
related_entities: List[str] # 相关实体
|
||||
gaps: List[str] # 知识缺口
|
||||
|
||||
|
||||
@dataclass
|
||||
class InferencePath:
|
||||
"""推理路径"""
|
||||
|
||||
start_entity: str
|
||||
end_entity: str
|
||||
path: List[Dict] # 路径上的节点和关系
|
||||
strength: float # 路径强度
|
||||
path: List[Dict] # 路径上的节点和关系
|
||||
strength: float # 路径强度
|
||||
|
||||
|
||||
class KnowledgeReasoner:
|
||||
"""知识推理引擎"""
|
||||
|
||||
|
||||
def __init__(self, api_key: str = None, base_url: str = None):
|
||||
self.api_key = api_key or KIMI_API_KEY
|
||||
self.base_url = base_url or KIMI_BASE_URL
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
||||
|
||||
async def _call_llm(self, prompt: str, temperature: float = 0.3) -> str:
|
||||
"""调用 LLM"""
|
||||
if not self.api_key:
|
||||
raise ValueError("KIMI_API_KEY not set")
|
||||
|
||||
payload = {
|
||||
"model": "k2p5",
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": temperature
|
||||
}
|
||||
|
||||
|
||||
payload = {"model": "k2p5", "messages": [{"role": "user", "content": prompt}], "temperature": temperature}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{self.base_url}/v1/chat/completions",
|
||||
headers=self.headers,
|
||||
json=payload,
|
||||
timeout=120.0
|
||||
f"{self.base_url}/v1/chat/completions", headers=self.headers, json=payload, timeout=120.0
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
return result["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
async def enhanced_qa(
|
||||
self,
|
||||
query: str,
|
||||
project_context: Dict,
|
||||
graph_data: Dict,
|
||||
reasoning_depth: str = "medium"
|
||||
self, query: str, project_context: Dict, graph_data: Dict, reasoning_depth: str = "medium"
|
||||
) -> ReasoningResult:
|
||||
"""
|
||||
增强问答 - 结合图谱推理的问答
|
||||
|
||||
|
||||
Args:
|
||||
query: 用户问题
|
||||
project_context: 项目上下文
|
||||
@@ -95,7 +84,7 @@ class KnowledgeReasoner:
|
||||
"""
|
||||
# 1. 分析问题类型
|
||||
analysis = await self._analyze_question(query)
|
||||
|
||||
|
||||
# 2. 根据问题类型选择推理策略
|
||||
if analysis["type"] == "causal":
|
||||
return await self._causal_reasoning(query, project_context, graph_data)
|
||||
@@ -105,7 +94,7 @@ class KnowledgeReasoner:
|
||||
return await self._temporal_reasoning(query, project_context, graph_data)
|
||||
else:
|
||||
return await self._associative_reasoning(query, project_context, graph_data)
|
||||
|
||||
|
||||
async def _analyze_question(self, query: str) -> Dict:
|
||||
"""分析问题类型和意图"""
|
||||
prompt = f"""分析以下问题的类型和意图:
|
||||
@@ -126,31 +115,27 @@ class KnowledgeReasoner:
|
||||
- temporal: 时序类问题(什么时候、进度、变化)
|
||||
- factual: 事实类问题(是什么、有哪些)
|
||||
- opinion: 观点类问题(怎么看、态度、评价)"""
|
||||
|
||||
|
||||
content = await self._call_llm(prompt, temperature=0.1)
|
||||
|
||||
|
||||
import re
|
||||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||||
|
||||
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
|
||||
if json_match:
|
||||
try:
|
||||
return json.loads(json_match.group())
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
return {"type": "factual", "entities": [], "intent": "general", "complexity": "simple"}
|
||||
|
||||
async def _causal_reasoning(
|
||||
self,
|
||||
query: str,
|
||||
project_context: Dict,
|
||||
graph_data: Dict
|
||||
) -> ReasoningResult:
|
||||
|
||||
async def _causal_reasoning(self, query: str, project_context: Dict, graph_data: Dict) -> ReasoningResult:
|
||||
"""因果推理 - 分析原因和影响"""
|
||||
|
||||
|
||||
# 构建因果分析提示
|
||||
entities_str = json.dumps(graph_data.get("entities", []), ensure_ascii=False, indent=2)
|
||||
relations_str = json.dumps(graph_data.get("relations", []), ensure_ascii=False, indent=2)
|
||||
|
||||
|
||||
prompt = f"""基于以下知识图谱进行因果推理分析:
|
||||
|
||||
## 问题
|
||||
@@ -175,12 +160,13 @@ class KnowledgeReasoner:
|
||||
"evidence": ["证据1", "证据2"],
|
||||
"knowledge_gaps": ["缺失信息1"]
|
||||
}}"""
|
||||
|
||||
|
||||
content = await self._call_llm(prompt, temperature=0.3)
|
||||
|
||||
|
||||
import re
|
||||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||||
|
||||
|
||||
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
|
||||
|
||||
if json_match:
|
||||
try:
|
||||
data = json.loads(json_match.group())
|
||||
@@ -190,28 +176,23 @@ class KnowledgeReasoner:
|
||||
confidence=data.get("confidence", 0.7),
|
||||
evidence=[{"text": e} for e in data.get("evidence", [])],
|
||||
related_entities=[],
|
||||
gaps=data.get("knowledge_gaps", [])
|
||||
gaps=data.get("knowledge_gaps", []),
|
||||
)
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
return ReasoningResult(
|
||||
answer=content,
|
||||
reasoning_type=ReasoningType.CAUSAL,
|
||||
confidence=0.5,
|
||||
evidence=[],
|
||||
related_entities=[],
|
||||
gaps=["无法完成因果推理"]
|
||||
gaps=["无法完成因果推理"],
|
||||
)
|
||||
|
||||
async def _comparative_reasoning(
|
||||
self,
|
||||
query: str,
|
||||
project_context: Dict,
|
||||
graph_data: Dict
|
||||
) -> ReasoningResult:
|
||||
|
||||
async def _comparative_reasoning(self, query: str, project_context: Dict, graph_data: Dict) -> ReasoningResult:
|
||||
"""对比推理 - 比较实体间的异同"""
|
||||
|
||||
|
||||
prompt = f"""基于以下知识图谱进行对比分析:
|
||||
|
||||
## 问题
|
||||
@@ -233,12 +214,13 @@ class KnowledgeReasoner:
|
||||
"evidence": ["证据1"],
|
||||
"knowledge_gaps": []
|
||||
}}"""
|
||||
|
||||
|
||||
content = await self._call_llm(prompt, temperature=0.3)
|
||||
|
||||
|
||||
import re
|
||||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||||
|
||||
|
||||
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
|
||||
|
||||
if json_match:
|
||||
try:
|
||||
data = json.loads(json_match.group())
|
||||
@@ -248,28 +230,23 @@ class KnowledgeReasoner:
|
||||
confidence=data.get("confidence", 0.7),
|
||||
evidence=[{"text": e} for e in data.get("evidence", [])],
|
||||
related_entities=[],
|
||||
gaps=data.get("knowledge_gaps", [])
|
||||
gaps=data.get("knowledge_gaps", []),
|
||||
)
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
return ReasoningResult(
|
||||
answer=content,
|
||||
reasoning_type=ReasoningType.COMPARATIVE,
|
||||
confidence=0.5,
|
||||
evidence=[],
|
||||
related_entities=[],
|
||||
gaps=[]
|
||||
gaps=[],
|
||||
)
|
||||
|
||||
async def _temporal_reasoning(
|
||||
self,
|
||||
query: str,
|
||||
project_context: Dict,
|
||||
graph_data: Dict
|
||||
) -> ReasoningResult:
|
||||
|
||||
async def _temporal_reasoning(self, query: str, project_context: Dict, graph_data: Dict) -> ReasoningResult:
|
||||
"""时序推理 - 分析时间线和演变"""
|
||||
|
||||
|
||||
prompt = f"""基于以下知识图谱进行时序分析:
|
||||
|
||||
## 问题
|
||||
@@ -291,12 +268,13 @@ class KnowledgeReasoner:
|
||||
"evidence": ["证据1"],
|
||||
"knowledge_gaps": []
|
||||
}}"""
|
||||
|
||||
|
||||
content = await self._call_llm(prompt, temperature=0.3)
|
||||
|
||||
|
||||
import re
|
||||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||||
|
||||
|
||||
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
|
||||
|
||||
if json_match:
|
||||
try:
|
||||
data = json.loads(json_match.group())
|
||||
@@ -306,28 +284,23 @@ class KnowledgeReasoner:
|
||||
confidence=data.get("confidence", 0.7),
|
||||
evidence=[{"text": e} for e in data.get("evidence", [])],
|
||||
related_entities=[],
|
||||
gaps=data.get("knowledge_gaps", [])
|
||||
gaps=data.get("knowledge_gaps", []),
|
||||
)
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
return ReasoningResult(
|
||||
answer=content,
|
||||
reasoning_type=ReasoningType.TEMPORAL,
|
||||
confidence=0.5,
|
||||
evidence=[],
|
||||
related_entities=[],
|
||||
gaps=[]
|
||||
gaps=[],
|
||||
)
|
||||
|
||||
async def _associative_reasoning(
|
||||
self,
|
||||
query: str,
|
||||
project_context: Dict,
|
||||
graph_data: Dict
|
||||
) -> ReasoningResult:
|
||||
|
||||
async def _associative_reasoning(self, query: str, project_context: Dict, graph_data: Dict) -> ReasoningResult:
|
||||
"""关联推理 - 发现实体间的隐含关联"""
|
||||
|
||||
|
||||
prompt = f"""基于以下知识图谱进行关联分析:
|
||||
|
||||
## 问题
|
||||
@@ -349,12 +322,13 @@ class KnowledgeReasoner:
|
||||
"evidence": ["证据1"],
|
||||
"knowledge_gaps": []
|
||||
}}"""
|
||||
|
||||
|
||||
content = await self._call_llm(prompt, temperature=0.4)
|
||||
|
||||
|
||||
import re
|
||||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||||
|
||||
|
||||
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
|
||||
|
||||
if json_match:
|
||||
try:
|
||||
data = json.loads(json_match.group())
|
||||
@@ -364,35 +338,31 @@ class KnowledgeReasoner:
|
||||
confidence=data.get("confidence", 0.7),
|
||||
evidence=[{"text": e} for e in data.get("evidence", [])],
|
||||
related_entities=[],
|
||||
gaps=data.get("knowledge_gaps", [])
|
||||
gaps=data.get("knowledge_gaps", []),
|
||||
)
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
return ReasoningResult(
|
||||
answer=content,
|
||||
reasoning_type=ReasoningType.ASSOCIATIVE,
|
||||
confidence=0.5,
|
||||
evidence=[],
|
||||
related_entities=[],
|
||||
gaps=[]
|
||||
gaps=[],
|
||||
)
|
||||
|
||||
|
||||
def find_inference_paths(
|
||||
self,
|
||||
start_entity: str,
|
||||
end_entity: str,
|
||||
graph_data: Dict,
|
||||
max_depth: int = 3
|
||||
self, start_entity: str, end_entity: str, graph_data: Dict, max_depth: int = 3
|
||||
) -> List[InferencePath]:
|
||||
"""
|
||||
发现两个实体之间的推理路径
|
||||
|
||||
|
||||
使用 BFS 在关系图中搜索路径
|
||||
"""
|
||||
entities = {e["id"]: e for e in graph_data.get("entities", [])}
|
||||
relations = graph_data.get("relations", [])
|
||||
|
||||
|
||||
# 构建邻接表
|
||||
adj = {}
|
||||
for r in relations:
|
||||
@@ -405,51 +375,56 @@ class KnowledgeReasoner:
|
||||
adj[src].append({"target": tgt, "relation": r.get("type", "related"), "data": r})
|
||||
# 无向图也添加反向
|
||||
adj[tgt].append({"target": src, "relation": r.get("type", "related"), "data": r, "reverse": True})
|
||||
|
||||
|
||||
# BFS 搜索路径
|
||||
from collections import deque
|
||||
|
||||
paths = []
|
||||
queue = deque([(start_entity, [{"entity": start_entity, "relation": None}])])
|
||||
visited = {start_entity}
|
||||
|
||||
{start_entity}
|
||||
|
||||
while queue and len(paths) < 5:
|
||||
current, path = queue.popleft()
|
||||
|
||||
|
||||
if current == end_entity and len(path) > 1:
|
||||
# 找到一条路径
|
||||
paths.append(InferencePath(
|
||||
start_entity=start_entity,
|
||||
end_entity=end_entity,
|
||||
path=path,
|
||||
strength=self._calculate_path_strength(path)
|
||||
))
|
||||
paths.append(
|
||||
InferencePath(
|
||||
start_entity=start_entity,
|
||||
end_entity=end_entity,
|
||||
path=path,
|
||||
strength=self._calculate_path_strength(path),
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
|
||||
if len(path) >= max_depth:
|
||||
continue
|
||||
|
||||
|
||||
for neighbor in adj.get(current, []):
|
||||
next_entity = neighbor["target"]
|
||||
if next_entity not in [p["entity"] for p in path]: # 避免循环
|
||||
new_path = path + [{
|
||||
"entity": next_entity,
|
||||
"relation": neighbor["relation"],
|
||||
"relation_data": neighbor.get("data", {})
|
||||
}]
|
||||
new_path = path + [
|
||||
{
|
||||
"entity": next_entity,
|
||||
"relation": neighbor["relation"],
|
||||
"relation_data": neighbor.get("data", {}),
|
||||
}
|
||||
]
|
||||
queue.append((next_entity, new_path))
|
||||
|
||||
|
||||
# 按强度排序
|
||||
paths.sort(key=lambda p: p.strength, reverse=True)
|
||||
return paths
|
||||
|
||||
|
||||
def _calculate_path_strength(self, path: List[Dict]) -> float:
|
||||
"""计算路径强度"""
|
||||
if len(path) < 2:
|
||||
return 0.0
|
||||
|
||||
|
||||
# 路径越短越强
|
||||
length_factor = 1.0 / len(path)
|
||||
|
||||
|
||||
# 关系置信度
|
||||
confidence_sum = 0
|
||||
confidence_count = 0
|
||||
@@ -458,20 +433,17 @@ class KnowledgeReasoner:
|
||||
if "confidence" in rel_data:
|
||||
confidence_sum += rel_data["confidence"]
|
||||
confidence_count += 1
|
||||
|
||||
|
||||
confidence_factor = (confidence_sum / confidence_count) if confidence_count > 0 else 0.5
|
||||
|
||||
|
||||
return length_factor * confidence_factor
|
||||
|
||||
|
||||
async def summarize_project(
|
||||
self,
|
||||
project_context: Dict,
|
||||
graph_data: Dict,
|
||||
summary_type: str = "comprehensive"
|
||||
self, project_context: Dict, graph_data: Dict, summary_type: str = "comprehensive"
|
||||
) -> Dict:
|
||||
"""
|
||||
项目智能总结
|
||||
|
||||
|
||||
Args:
|
||||
summary_type: comprehensive/executive/technical/risk
|
||||
"""
|
||||
@@ -479,9 +451,9 @@ class KnowledgeReasoner:
|
||||
"comprehensive": "全面总结项目的所有方面",
|
||||
"executive": "高管摘要,关注关键决策和风险",
|
||||
"technical": "技术总结,关注架构和技术栈",
|
||||
"risk": "风险分析,关注潜在问题和依赖"
|
||||
"risk": "风险分析,关注潜在问题和依赖",
|
||||
}
|
||||
|
||||
|
||||
prompt = f"""请对以下项目进行{type_prompts.get(summary_type, "全面总结")}:
|
||||
|
||||
## 项目信息
|
||||
@@ -500,25 +472,26 @@ class KnowledgeReasoner:
|
||||
"recommendations": ["建议1"],
|
||||
"confidence": 0.85
|
||||
}}"""
|
||||
|
||||
|
||||
content = await self._call_llm(prompt, temperature=0.3)
|
||||
|
||||
|
||||
import re
|
||||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||||
|
||||
|
||||
json_match = re.search(r"\{{.*?\}}", content, re.DOTALL)
|
||||
|
||||
if json_match:
|
||||
try:
|
||||
return json.loads(json_match.group())
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
|
||||
return {
|
||||
"overview": content,
|
||||
"key_points": [],
|
||||
"key_entities": [],
|
||||
"risks": [],
|
||||
"recommendations": [],
|
||||
"confidence": 0.5
|
||||
"confidence": 0.5,
|
||||
}
|
||||
|
||||
|
||||
@@ -530,4 +503,4 @@ def get_knowledge_reasoner() -> KnowledgeReasoner:
|
||||
global _reasoner
|
||||
if _reasoner is None:
|
||||
_reasoner = KnowledgeReasoner()
|
||||
return _reasoner
|
||||
return _reasoner
|
||||
|
||||
Reference in New Issue
Block a user