- Add collaboration_manager import and COLLABORATION_AVAILABLE flag - Add get_collab_manager() singleton function - Add collaboration API endpoints: - Project share links (create, list, verify, access, revoke) - Comments (add, get, update, resolve, delete) - Change history (get, stats, versions, revert) - Team members (invite, list, update role, remove, check permissions) - Add collaboration tables to schema.sql: - project_shares, comments, change_history, team_members - Related indexes for performance
7543 lines
243 KiB
Python
7543 lines
243 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
InsightFlow Backend - Phase 6 (API Platform)
|
||
API 开放平台:API Key 管理、Swagger 文档、限流
|
||
Knowledge Growth: Multi-file fusion + Entity Alignment + Document Import
|
||
ASR: 阿里云听悟 + OSS
|
||
"""
|
||
|
||
import os
|
||
import sys
|
||
import json
|
||
import hashlib
|
||
import secrets
|
||
import httpx
|
||
import uuid
|
||
import re
|
||
import io
|
||
import time
|
||
from fastapi import FastAPI, File, UploadFile, HTTPException, Form, Depends, Header, Request
|
||
from fastapi.middleware.cors import CORSMiddleware
|
||
from fastapi.staticfiles import StaticFiles
|
||
from fastapi.responses import JSONResponse
|
||
from pydantic import BaseModel, Field
|
||
from typing import List, Optional, Union, Dict
|
||
from datetime import datetime
|
||
|
||
# Add backend directory to path for imports
|
||
backend_dir = os.path.dirname(os.path.abspath(__file__))
|
||
if backend_dir not in sys.path:
|
||
sys.path.insert(0, backend_dir)
|
||
|
||
# Import clients
|
||
try:
|
||
from oss_uploader import get_oss_uploader
|
||
OSS_AVAILABLE = True
|
||
except ImportError:
|
||
OSS_AVAILABLE = False
|
||
|
||
try:
|
||
from tingwu_client import TingwuClient
|
||
TINGWU_AVAILABLE = True
|
||
except ImportError:
|
||
TINGWU_AVAILABLE = False
|
||
|
||
try:
|
||
from db_manager import get_db_manager, Project, Entity, EntityMention
|
||
DB_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"DB import error: {e}")
|
||
DB_AVAILABLE = False
|
||
|
||
try:
|
||
from document_processor import DocumentProcessor
|
||
DOC_PROCESSOR_AVAILABLE = True
|
||
except ImportError:
|
||
DOC_PROCESSOR_AVAILABLE = False
|
||
|
||
try:
|
||
from entity_aligner import EntityAligner
|
||
ALIGNER_AVAILABLE = True
|
||
except ImportError:
|
||
ALIGNER_AVAILABLE = False
|
||
|
||
try:
|
||
from llm_client import get_llm_client, ChatMessage
|
||
LLM_CLIENT_AVAILABLE = True
|
||
except ImportError:
|
||
LLM_CLIENT_AVAILABLE = False
|
||
|
||
try:
|
||
from knowledge_reasoner import get_knowledge_reasoner, KnowledgeReasoner, ReasoningType
|
||
REASONER_AVAILABLE = True
|
||
except ImportError:
|
||
REASONER_AVAILABLE = False
|
||
|
||
try:
|
||
from export_manager import get_export_manager, ExportEntity, ExportRelation, ExportTranscript
|
||
EXPORT_AVAILABLE = True
|
||
except ImportError:
|
||
EXPORT_AVAILABLE = False
|
||
|
||
try:
|
||
from neo4j_manager import get_neo4j_manager, sync_project_to_neo4j, NEO4J_AVAILABLE
|
||
except ImportError:
|
||
NEO4J_AVAILABLE = False
|
||
|
||
# Phase 6: API Key Manager
|
||
try:
|
||
from api_key_manager import get_api_key_manager, ApiKeyManager, ApiKey
|
||
API_KEY_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"API Key Manager import error: {e}")
|
||
API_KEY_AVAILABLE = False
|
||
|
||
# Phase 6: Rate Limiter
|
||
try:
|
||
from rate_limiter import get_rate_limiter, RateLimitConfig, RateLimitInfo
|
||
RATE_LIMITER_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Rate Limiter import error: {e}")
|
||
RATE_LIMITER_AVAILABLE = False
|
||
|
||
# Phase 7: Workflow Manager
|
||
try:
|
||
from workflow_manager import (
|
||
get_workflow_manager, WorkflowManager, Workflow, WorkflowTask,
|
||
WebhookConfig, WorkflowLog, WorkflowType, WebhookType, TaskStatus
|
||
)
|
||
WORKFLOW_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Workflow Manager import error: {e}")
|
||
WORKFLOW_AVAILABLE = False
|
||
|
||
# Phase 7: Multimodal Support
|
||
try:
|
||
from multimodal_processor import (
|
||
get_multimodal_processor, MultimodalProcessor,
|
||
VideoProcessingResult, VideoFrame
|
||
)
|
||
MULTIMODAL_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Multimodal Processor import error: {e}")
|
||
MULTIMODAL_AVAILABLE = False
|
||
|
||
try:
|
||
from image_processor import (
|
||
get_image_processor, ImageProcessor,
|
||
ImageProcessingResult, ImageEntity, ImageRelation
|
||
)
|
||
IMAGE_PROCESSOR_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Image Processor import error: {e}")
|
||
IMAGE_PROCESSOR_AVAILABLE = False
|
||
|
||
try:
|
||
from multimodal_entity_linker import (
|
||
get_multimodal_entity_linker, MultimodalEntityLinker,
|
||
MultimodalEntity, EntityLink, AlignmentResult, FusionResult
|
||
)
|
||
MULTIMODAL_LINKER_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Multimodal Entity Linker import error: {e}")
|
||
MULTIMODAL_LINKER_AVAILABLE = False
|
||
|
||
# Phase 7 Task 7: Plugin Manager
|
||
try:
|
||
from plugin_manager import (
|
||
get_plugin_manager, PluginManager, Plugin,
|
||
BotSession, WebhookEndpoint, WebDAVSync,
|
||
PluginType, PluginStatus, ChromeExtensionHandler, BotHandler,
|
||
WebhookIntegration
|
||
)
|
||
PLUGIN_MANAGER_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Plugin Manager import error: {e}")
|
||
PLUGIN_MANAGER_AVAILABLE = False
|
||
|
||
# Phase 7 Task 3: Security Manager
|
||
try:
|
||
from security_manager import (
|
||
get_security_manager, SecurityManager,
|
||
AuditLog, EncryptionConfig, MaskingRule, DataAccessPolicy, AccessRequest,
|
||
AuditActionType, MaskingRuleType
|
||
)
|
||
SECURITY_MANAGER_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Security Manager import error: {e}")
|
||
SECURITY_MANAGER_AVAILABLE = False
|
||
|
||
# Phase 7 Task 4: Collaboration Manager
|
||
try:
|
||
from collaboration_manager import get_collaboration_manager, CollaborationManager
|
||
COLLABORATION_AVAILABLE = True
|
||
except ImportError as e:
|
||
print(f"Collaboration Manager import error: {e}")
|
||
COLLABORATION_AVAILABLE = False
|
||
|
||
# FastAPI app with enhanced metadata for Swagger
|
||
app = FastAPI(
|
||
title="InsightFlow API",
|
||
description="""
|
||
InsightFlow 知识管理平台 API
|
||
|
||
## 功能
|
||
|
||
* **项目管理** - 创建、读取、更新、删除项目
|
||
* **实体管理** - 实体提取、对齐、属性管理
|
||
* **关系管理** - 实体关系创建、查询、分析
|
||
* **转录管理** - 音频转录、文档导入
|
||
* **知识推理** - 因果推理、对比分析、时序分析
|
||
* **图分析** - Neo4j 图数据库集成、路径查询
|
||
* **导出功能** - 多种格式导出(PDF、Excel、CSV、JSON)
|
||
* **工作流** - 自动化任务、Webhook 通知
|
||
|
||
## 认证
|
||
|
||
大部分 API 需要 API Key 认证。在请求头中添加:
|
||
```
|
||
X-API-Key: your_api_key_here
|
||
```
|
||
""",
|
||
version="0.7.0",
|
||
contact={
|
||
"name": "InsightFlow Team",
|
||
"url": "https://github.com/insightflow/insightflow",
|
||
},
|
||
license_info={
|
||
"name": "MIT",
|
||
"url": "https://opensource.org/licenses/MIT",
|
||
},
|
||
openapi_tags=[
|
||
{"name": "Projects", "description": "项目管理"},
|
||
{"name": "Entities", "description": "实体管理"},
|
||
{"name": "Relations", "description": "关系管理"},
|
||
{"name": "Transcripts", "description": "转录管理"},
|
||
{"name": "Analysis", "description": "分析和推理"},
|
||
{"name": "Graph", "description": "图分析和 Neo4j"},
|
||
{"name": "Export", "description": "数据导出"},
|
||
{"name": "API Keys", "description": "API 密钥管理"},
|
||
{"name": "Workflows", "description": "工作流自动化"},
|
||
{"name": "Webhooks", "description": "Webhook 配置"},
|
||
{"name": "Multimodal", "description": "多模态支持(视频、图片)"},
|
||
{"name": "Plugins", "description": "插件管理"},
|
||
{"name": "Chrome Extension", "description": "Chrome 扩展集成"},
|
||
{"name": "Bot", "description": "飞书/钉钉机器人"},
|
||
{"name": "Integrations", "description": "Zapier/Make 集成"},
|
||
{"name": "WebDAV", "description": "WebDAV 同步"},
|
||
{"name": "Security", "description": "数据安全与合规(加密、脱敏、审计)"},
|
||
{"name": "System", "description": "系统信息"},
|
||
]
|
||
)
|
||
|
||
app.add_middleware(
|
||
CORSMiddleware,
|
||
allow_origins=["*"],
|
||
allow_credentials=True,
|
||
allow_methods=["*"],
|
||
allow_headers=["*"],
|
||
)
|
||
|
||
# ==================== Phase 6: API Key Authentication & Rate Limiting ====================
|
||
|
||
# 公开访问的路径(不需要 API Key)
|
||
PUBLIC_PATHS = {
|
||
"/", "/docs", "/openapi.json", "/redoc",
|
||
"/api/v1/health", "/api/v1/status",
|
||
"/api/v1/api-keys", # POST 创建 API Key 不需要认证
|
||
}
|
||
|
||
# 管理路径(需要 master key)
|
||
ADMIN_PATHS = {
|
||
"/api/v1/admin/",
|
||
}
|
||
|
||
# Master Key(用于管理所有 API Keys)
|
||
MASTER_KEY = os.getenv("INSIGHTFLOW_MASTER_KEY", "")
|
||
|
||
|
||
async def verify_api_key(request: Request, x_api_key: Optional[str] = Header(None, alias="X-API-Key")):
|
||
"""
|
||
验证 API Key 的依赖函数
|
||
|
||
- 公开路径不需要认证
|
||
- 管理路径需要 master key
|
||
- 其他路径需要有效的 API Key
|
||
"""
|
||
path = request.url.path
|
||
method = request.method
|
||
|
||
# 公开路径直接放行
|
||
if any(path.startswith(p) for p in PUBLIC_PATHS):
|
||
return None
|
||
|
||
# 创建 API Key 的端点不需要认证(但需要 master key 或其他验证)
|
||
if path == "/api/v1/api-keys" and method == "POST":
|
||
return None
|
||
|
||
# 检查是否是管理路径
|
||
if any(path.startswith(p) for p in ADMIN_PATHS):
|
||
if not x_api_key or x_api_key != MASTER_KEY:
|
||
raise HTTPException(
|
||
status_code=403,
|
||
detail="Admin access required. Provide valid master key in X-API-Key header."
|
||
)
|
||
return {"type": "admin", "key": x_api_key}
|
||
|
||
# 其他路径需要有效的 API Key
|
||
if not API_KEY_AVAILABLE:
|
||
# API Key 模块不可用,允许访问(开发模式)
|
||
return None
|
||
|
||
if not x_api_key:
|
||
raise HTTPException(
|
||
status_code=401,
|
||
detail="API Key required. Provide your key in X-API-Key header.",
|
||
headers={"WWW-Authenticate": "ApiKey"}
|
||
)
|
||
|
||
# 验证 API Key
|
||
key_manager = get_api_key_manager()
|
||
api_key = key_manager.validate_key(x_api_key)
|
||
|
||
if not api_key:
|
||
raise HTTPException(
|
||
status_code=401,
|
||
detail="Invalid or expired API Key"
|
||
)
|
||
|
||
# 更新最后使用时间
|
||
key_manager.update_last_used(api_key.id)
|
||
|
||
# 将 API Key 信息存储在请求状态中,供后续使用
|
||
request.state.api_key = api_key
|
||
|
||
return {"type": "api_key", "key_id": api_key.id, "permissions": api_key.permissions}
|
||
|
||
|
||
async def rate_limit_middleware(request: Request, call_next):
|
||
"""
|
||
限流中间件
|
||
"""
|
||
if not RATE_LIMITER_AVAILABLE or not API_KEY_AVAILABLE:
|
||
response = await call_next(request)
|
||
return response
|
||
|
||
path = request.url.path
|
||
|
||
# 公开路径不限流
|
||
if any(path.startswith(p) for p in PUBLIC_PATHS):
|
||
response = await call_next(request)
|
||
return response
|
||
|
||
# 获取限流键
|
||
limiter = get_rate_limiter()
|
||
|
||
# 检查是否有 API Key
|
||
x_api_key = request.headers.get("X-API-Key")
|
||
|
||
if x_api_key and x_api_key == MASTER_KEY:
|
||
# Master key 有更高的限流
|
||
config = RateLimitConfig(requests_per_minute=1000)
|
||
limit_key = f"master:{x_api_key[:16]}"
|
||
elif hasattr(request.state, 'api_key') and request.state.api_key:
|
||
# 使用 API Key 的限流配置
|
||
api_key = request.state.api_key
|
||
config = RateLimitConfig(requests_per_minute=api_key.rate_limit)
|
||
limit_key = f"api_key:{api_key.id}"
|
||
else:
|
||
# IP 限流(未认证用户)
|
||
client_ip = request.client.host if request.client else "unknown"
|
||
config = RateLimitConfig(requests_per_minute=10)
|
||
limit_key = f"ip:{client_ip}"
|
||
|
||
# 检查限流
|
||
info = await limiter.is_allowed(limit_key, config)
|
||
|
||
if not info.allowed:
|
||
return JSONResponse(
|
||
status_code=429,
|
||
content={
|
||
"error": "Rate limit exceeded",
|
||
"retry_after": info.retry_after,
|
||
"limit": config.requests_per_minute,
|
||
"window": "minute"
|
||
},
|
||
headers={
|
||
"X-RateLimit-Limit": str(config.requests_per_minute),
|
||
"X-RateLimit-Remaining": "0",
|
||
"X-RateLimit-Reset": str(info.reset_time),
|
||
"Retry-After": str(info.retry_after)
|
||
}
|
||
)
|
||
|
||
# 继续处理请求
|
||
start_time = time.time()
|
||
response = await call_next(request)
|
||
|
||
# 添加限流头
|
||
response.headers["X-RateLimit-Limit"] = str(config.requests_per_minute)
|
||
response.headers["X-RateLimit-Remaining"] = str(info.remaining)
|
||
response.headers["X-RateLimit-Reset"] = str(info.reset_time)
|
||
|
||
# 记录 API 调用日志
|
||
try:
|
||
if hasattr(request.state, 'api_key') and request.state.api_key:
|
||
api_key = request.state.api_key
|
||
response_time = int((time.time() - start_time) * 1000)
|
||
key_manager = get_api_key_manager()
|
||
key_manager.log_api_call(
|
||
api_key_id=api_key.id,
|
||
endpoint=path,
|
||
method=request.method,
|
||
status_code=response.status_code,
|
||
response_time_ms=response_time,
|
||
ip_address=request.client.host if request.client else "",
|
||
user_agent=request.headers.get("User-Agent", "")
|
||
)
|
||
except Exception as e:
|
||
# 日志记录失败不应影响主流程
|
||
print(f"Failed to log API call: {e}")
|
||
|
||
return response
|
||
|
||
|
||
# 添加限流中间件
|
||
app.middleware("http")(rate_limit_middleware)
|
||
|
||
# ==================== Phase 6: Pydantic Models for API ====================
|
||
|
||
# API Key 相关模型
|
||
class ApiKeyCreate(BaseModel):
|
||
name: str = Field(..., description="API Key 名称/描述")
|
||
permissions: List[str] = Field(default=["read"], description="权限列表: read, write, delete")
|
||
rate_limit: int = Field(default=60, description="每分钟请求限制")
|
||
expires_days: Optional[int] = Field(default=None, description="过期天数(可选)")
|
||
|
||
|
||
class ApiKeyResponse(BaseModel):
|
||
id: str
|
||
key_preview: str
|
||
name: str
|
||
permissions: List[str]
|
||
rate_limit: int
|
||
status: str
|
||
created_at: str
|
||
expires_at: Optional[str]
|
||
last_used_at: Optional[str]
|
||
total_calls: int
|
||
|
||
|
||
class ApiKeyCreateResponse(BaseModel):
|
||
api_key: str = Field(..., description="API Key(仅显示一次,请妥善保存)")
|
||
info: ApiKeyResponse
|
||
|
||
|
||
class ApiKeyListResponse(BaseModel):
|
||
keys: List[ApiKeyResponse]
|
||
total: int
|
||
|
||
|
||
class ApiKeyUpdate(BaseModel):
|
||
name: Optional[str] = None
|
||
permissions: Optional[List[str]] = None
|
||
rate_limit: Optional[int] = None
|
||
|
||
|
||
class ApiCallStats(BaseModel):
|
||
total_calls: int
|
||
success_calls: int
|
||
error_calls: int
|
||
avg_response_time_ms: float
|
||
max_response_time_ms: int
|
||
min_response_time_ms: int
|
||
|
||
|
||
class ApiStatsResponse(BaseModel):
|
||
summary: ApiCallStats
|
||
endpoints: List[Dict]
|
||
daily: List[Dict]
|
||
|
||
|
||
class ApiCallLog(BaseModel):
|
||
id: int
|
||
endpoint: str
|
||
method: str
|
||
status_code: int
|
||
response_time_ms: int
|
||
ip_address: str
|
||
user_agent: str
|
||
error_message: str
|
||
created_at: str
|
||
|
||
|
||
class ApiLogsResponse(BaseModel):
|
||
logs: List[ApiCallLog]
|
||
total: int
|
||
|
||
|
||
class RateLimitStatus(BaseModel):
|
||
limit: int
|
||
remaining: int
|
||
reset_time: int
|
||
window: str
|
||
|
||
|
||
# 原有模型(保留)
|
||
class EntityModel(BaseModel):
|
||
id: str
|
||
name: str
|
||
type: str
|
||
definition: Optional[str] = ""
|
||
aliases: List[str] = []
|
||
|
||
class TranscriptSegment(BaseModel):
|
||
start: float
|
||
end: float
|
||
text: str
|
||
speaker: Optional[str] = "Speaker A"
|
||
|
||
class AnalysisResult(BaseModel):
|
||
transcript_id: str
|
||
project_id: str
|
||
segments: List[TranscriptSegment]
|
||
entities: List[EntityModel]
|
||
full_text: str
|
||
created_at: str
|
||
|
||
class ProjectCreate(BaseModel):
|
||
name: str
|
||
description: str = ""
|
||
|
||
class EntityUpdate(BaseModel):
|
||
name: Optional[str] = None
|
||
type: Optional[str] = None
|
||
definition: Optional[str] = None
|
||
aliases: Optional[List[str]] = None
|
||
|
||
class RelationCreate(BaseModel):
|
||
source_entity_id: str
|
||
target_entity_id: str
|
||
relation_type: str
|
||
evidence: Optional[str] = ""
|
||
|
||
class TranscriptUpdate(BaseModel):
|
||
full_text: str
|
||
|
||
class AgentQuery(BaseModel):
|
||
query: str
|
||
stream: bool = False
|
||
|
||
class AgentCommand(BaseModel):
|
||
command: str
|
||
|
||
class EntityMergeRequest(BaseModel):
|
||
source_entity_id: str
|
||
target_entity_id: str
|
||
|
||
class GlossaryTermCreate(BaseModel):
|
||
term: str
|
||
pronunciation: Optional[str] = ""
|
||
|
||
|
||
# ==================== Phase 7: Workflow Pydantic Models ====================
|
||
|
||
class WorkflowCreate(BaseModel):
|
||
name: str = Field(..., description="工作流名称")
|
||
description: str = Field(default="", description="工作流描述")
|
||
workflow_type: str = Field(..., description="工作流类型: auto_analyze, auto_align, auto_relation, scheduled_report, custom")
|
||
project_id: str = Field(..., description="所属项目ID")
|
||
schedule: Optional[str] = Field(default=None, description="调度表达式(cron或分钟数)")
|
||
schedule_type: str = Field(default="manual", description="调度类型: manual, cron, interval")
|
||
config: Dict = Field(default_factory=dict, description="工作流配置")
|
||
webhook_ids: List[str] = Field(default_factory=list, description="关联的Webhook ID列表")
|
||
|
||
|
||
class WorkflowUpdate(BaseModel):
|
||
name: Optional[str] = None
|
||
description: Optional[str] = None
|
||
status: Optional[str] = None # active, paused, error, completed
|
||
schedule: Optional[str] = None
|
||
schedule_type: Optional[str] = None
|
||
is_active: Optional[bool] = None
|
||
config: Optional[Dict] = None
|
||
webhook_ids: Optional[List[str]] = None
|
||
|
||
|
||
class WorkflowResponse(BaseModel):
|
||
id: str
|
||
name: str
|
||
description: str
|
||
workflow_type: str
|
||
project_id: str
|
||
status: str
|
||
schedule: Optional[str]
|
||
schedule_type: str
|
||
config: Dict
|
||
webhook_ids: List[str]
|
||
is_active: bool
|
||
created_at: str
|
||
updated_at: str
|
||
last_run_at: Optional[str]
|
||
next_run_at: Optional[str]
|
||
run_count: int
|
||
success_count: int
|
||
fail_count: int
|
||
|
||
|
||
class WorkflowListResponse(BaseModel):
|
||
workflows: List[WorkflowResponse]
|
||
total: int
|
||
|
||
|
||
class WorkflowTaskCreate(BaseModel):
|
||
name: str = Field(..., description="任务名称")
|
||
task_type: str = Field(..., description="任务类型: analyze, align, discover_relations, notify, custom")
|
||
config: Dict = Field(default_factory=dict, description="任务配置")
|
||
order: int = Field(default=0, description="执行顺序")
|
||
depends_on: List[str] = Field(default_factory=list, description="依赖的任务ID列表")
|
||
timeout_seconds: int = Field(default=300, description="超时时间(秒)")
|
||
retry_count: int = Field(default=3, description="重试次数")
|
||
retry_delay: int = Field(default=5, description="重试延迟(秒)")
|
||
|
||
|
||
class WorkflowTaskUpdate(BaseModel):
|
||
name: Optional[str] = None
|
||
task_type: Optional[str] = None
|
||
config: Optional[Dict] = None
|
||
order: Optional[int] = None
|
||
depends_on: Optional[List[str]] = None
|
||
timeout_seconds: Optional[int] = None
|
||
retry_count: Optional[int] = None
|
||
retry_delay: Optional[int] = None
|
||
|
||
|
||
class WorkflowTaskResponse(BaseModel):
|
||
id: str
|
||
workflow_id: str
|
||
name: str
|
||
task_type: str
|
||
config: Dict
|
||
order: int
|
||
depends_on: List[str]
|
||
timeout_seconds: int
|
||
retry_count: int
|
||
retry_delay: int
|
||
created_at: str
|
||
updated_at: str
|
||
|
||
|
||
class WebhookCreate(BaseModel):
|
||
name: str = Field(..., description="Webhook名称")
|
||
webhook_type: str = Field(..., description="Webhook类型: feishu, dingtalk, slack, custom")
|
||
url: str = Field(..., description="Webhook URL")
|
||
secret: str = Field(default="", description="签名密钥")
|
||
headers: Dict = Field(default_factory=dict, description="自定义请求头")
|
||
template: str = Field(default="", description="消息模板")
|
||
|
||
|
||
class WebhookUpdate(BaseModel):
|
||
name: Optional[str] = None
|
||
webhook_type: Optional[str] = None
|
||
url: Optional[str] = None
|
||
secret: Optional[str] = None
|
||
headers: Optional[Dict] = None
|
||
template: Optional[str] = None
|
||
is_active: Optional[bool] = None
|
||
|
||
|
||
class WebhookResponse(BaseModel):
|
||
id: str
|
||
name: str
|
||
webhook_type: str
|
||
url: str
|
||
headers: Dict
|
||
template: str
|
||
is_active: bool
|
||
created_at: str
|
||
updated_at: str
|
||
last_used_at: Optional[str]
|
||
success_count: int
|
||
fail_count: int
|
||
|
||
|
||
class WebhookListResponse(BaseModel):
|
||
webhooks: List[WebhookResponse]
|
||
total: int
|
||
|
||
|
||
class WorkflowLogResponse(BaseModel):
|
||
id: str
|
||
workflow_id: str
|
||
task_id: Optional[str]
|
||
status: str
|
||
start_time: Optional[str]
|
||
end_time: Optional[str]
|
||
duration_ms: int
|
||
input_data: Dict
|
||
output_data: Dict
|
||
error_message: str
|
||
created_at: str
|
||
|
||
|
||
class WorkflowLogListResponse(BaseModel):
|
||
logs: List[WorkflowLogResponse]
|
||
total: int
|
||
|
||
|
||
class WorkflowTriggerRequest(BaseModel):
|
||
input_data: Dict = Field(default_factory=dict, description="工作流输入数据")
|
||
|
||
|
||
class WorkflowTriggerResponse(BaseModel):
|
||
success: bool
|
||
workflow_id: str
|
||
log_id: str
|
||
results: Dict
|
||
duration_ms: int
|
||
|
||
|
||
class WorkflowStatsResponse(BaseModel):
|
||
total: int
|
||
success: int
|
||
failed: int
|
||
success_rate: float
|
||
avg_duration_ms: float
|
||
daily: List[Dict]
|
||
|
||
|
||
# API Keys
|
||
KIMI_API_KEY = os.getenv("KIMI_API_KEY", "")
|
||
KIMI_BASE_URL = os.getenv("KIMI_BASE_URL", "https://api.kimi.com/coding")
|
||
|
||
# Phase 3: Entity Aligner singleton
|
||
_aligner = None
|
||
def get_aligner():
|
||
global _aligner
|
||
if _aligner is None and ALIGNER_AVAILABLE:
|
||
_aligner = EntityAligner()
|
||
return _aligner
|
||
|
||
# Phase 3: Document Processor singleton
|
||
_doc_processor = None
|
||
def get_doc_processor():
|
||
global _doc_processor
|
||
if _doc_processor is None and DOC_PROCESSOR_AVAILABLE:
|
||
_doc_processor = DocumentProcessor()
|
||
return _doc_processor
|
||
|
||
# Phase 7 Task 4: Collaboration Manager singleton
|
||
_collaboration_manager = None
|
||
def get_collab_manager():
|
||
global _collaboration_manager
|
||
if _collaboration_manager is None and COLLABORATION_AVAILABLE:
|
||
db = get_db_manager() if DB_AVAILABLE else None
|
||
_collaboration_manager = get_collaboration_manager(db)
|
||
return _collaboration_manager
|
||
|
||
# Phase 2: Entity Edit API
|
||
@app.put("/api/v1/entities/{entity_id}", tags=["Entities"])
|
||
async def update_entity(entity_id: str, update: EntityUpdate, _=Depends(verify_api_key)):
|
||
"""更新实体信息(名称、类型、定义、别名)"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
entity = db.get_entity(entity_id)
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
# 更新字段
|
||
update_data = {k: v for k, v in update.dict().items() if v is not None}
|
||
updated = db.update_entity(entity_id, **update_data)
|
||
|
||
return {
|
||
"id": updated.id,
|
||
"name": updated.name,
|
||
"type": updated.type,
|
||
"definition": updated.definition,
|
||
"aliases": updated.aliases
|
||
}
|
||
|
||
@app.delete("/api/v1/entities/{entity_id}", tags=["Entities"])
|
||
async def delete_entity(entity_id: str, _=Depends(verify_api_key)):
|
||
"""删除实体"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
entity = db.get_entity(entity_id)
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
db.delete_entity(entity_id)
|
||
return {"success": True, "message": f"Entity {entity_id} deleted"}
|
||
|
||
@app.post("/api/v1/entities/{entity_id}/merge", tags=["Entities"])
|
||
async def merge_entities_endpoint(entity_id: str, merge_req: EntityMergeRequest, _=Depends(verify_api_key)):
|
||
"""合并两个实体"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
|
||
# 验证两个实体都存在
|
||
source = db.get_entity(merge_req.source_entity_id)
|
||
target = db.get_entity(merge_req.target_entity_id)
|
||
|
||
if not source or not target:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
result = db.merge_entities(merge_req.target_entity_id, merge_req.source_entity_id)
|
||
return {
|
||
"success": True,
|
||
"merged_entity": {
|
||
"id": result.id,
|
||
"name": result.name,
|
||
"type": result.type,
|
||
"definition": result.definition,
|
||
"aliases": result.aliases
|
||
}
|
||
}
|
||
|
||
# Phase 2: Relation Edit API
|
||
@app.post("/api/v1/projects/{project_id}/relations", tags=["Relations"])
|
||
async def create_relation_endpoint(project_id: str, relation: RelationCreate, _=Depends(verify_api_key)):
|
||
"""创建新的实体关系"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
|
||
# 验证实体存在
|
||
source = db.get_entity(relation.source_entity_id)
|
||
target = db.get_entity(relation.target_entity_id)
|
||
|
||
if not source or not target:
|
||
raise HTTPException(status_code=404, detail="Source or target entity not found")
|
||
|
||
relation_id = db.create_relation(
|
||
project_id=project_id,
|
||
source_entity_id=relation.source_entity_id,
|
||
target_entity_id=relation.target_entity_id,
|
||
relation_type=relation.relation_type,
|
||
evidence=relation.evidence
|
||
)
|
||
|
||
return {
|
||
"id": relation_id,
|
||
"source_id": relation.source_entity_id,
|
||
"target_id": relation.target_entity_id,
|
||
"type": relation.relation_type,
|
||
"success": True
|
||
}
|
||
|
||
@app.delete("/api/v1/relations/{relation_id}", tags=["Relations"])
|
||
async def delete_relation(relation_id: str, _=Depends(verify_api_key)):
|
||
"""删除关系"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
db.delete_relation(relation_id)
|
||
return {"success": True, "message": f"Relation {relation_id} deleted"}
|
||
|
||
@app.put("/api/v1/relations/{relation_id}", tags=["Relations"])
|
||
async def update_relation(relation_id: str, relation: RelationCreate, _=Depends(verify_api_key)):
|
||
"""更新关系"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
updated = db.update_relation(
|
||
relation_id=relation_id,
|
||
relation_type=relation.relation_type,
|
||
evidence=relation.evidence
|
||
)
|
||
|
||
return {
|
||
"id": relation_id,
|
||
"type": updated["relation_type"],
|
||
"evidence": updated["evidence"],
|
||
"success": True
|
||
}
|
||
|
||
# Phase 2: Transcript Edit API
|
||
@app.get("/api/v1/transcripts/{transcript_id}", tags=["Transcripts"])
|
||
async def get_transcript(transcript_id: str, _=Depends(verify_api_key)):
|
||
"""获取转录详情"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
transcript = db.get_transcript(transcript_id)
|
||
|
||
if not transcript:
|
||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||
|
||
return transcript
|
||
|
||
@app.put("/api/v1/transcripts/{transcript_id}", tags=["Transcripts"])
|
||
async def update_transcript(transcript_id: str, update: TranscriptUpdate, _=Depends(verify_api_key)):
|
||
"""更新转录文本(人工修正)"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
transcript = db.get_transcript(transcript_id)
|
||
|
||
if not transcript:
|
||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||
|
||
updated = db.update_transcript(transcript_id, update.full_text)
|
||
return {
|
||
"id": transcript_id,
|
||
"full_text": updated["full_text"],
|
||
"updated_at": updated["updated_at"],
|
||
"success": True
|
||
}
|
||
|
||
# Phase 2: Manual Entity Creation
|
||
class ManualEntityCreate(BaseModel):
|
||
name: str
|
||
type: str = "OTHER"
|
||
definition: str = ""
|
||
transcript_id: Optional[str] = None
|
||
start_pos: Optional[int] = None
|
||
end_pos: Optional[int] = None
|
||
|
||
@app.post("/api/v1/projects/{project_id}/entities", tags=["Entities"])
|
||
async def create_manual_entity(project_id: str, entity: ManualEntityCreate, _=Depends(verify_api_key)):
|
||
"""手动创建实体(划词新建)"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
|
||
# 检查是否已存在
|
||
existing = db.get_entity_by_name(project_id, entity.name)
|
||
if existing:
|
||
return {
|
||
"id": existing.id,
|
||
"name": existing.name,
|
||
"type": existing.type,
|
||
"existed": True
|
||
}
|
||
|
||
entity_id = str(uuid.uuid4())[:8]
|
||
new_entity = db.create_entity(Entity(
|
||
id=entity_id,
|
||
project_id=project_id,
|
||
name=entity.name,
|
||
type=entity.type,
|
||
definition=entity.definition
|
||
))
|
||
|
||
# 如果有提及位置信息,保存提及
|
||
if entity.transcript_id and entity.start_pos is not None and entity.end_pos is not None:
|
||
transcript = db.get_transcript(entity.transcript_id)
|
||
if transcript:
|
||
text = transcript["full_text"]
|
||
mention = EntityMention(
|
||
id=str(uuid.uuid4())[:8],
|
||
entity_id=entity_id,
|
||
transcript_id=entity.transcript_id,
|
||
start_pos=entity.start_pos,
|
||
end_pos=entity.end_pos,
|
||
text_snippet=text[max(0, entity.start_pos-20):min(len(text), entity.end_pos+20)],
|
||
confidence=1.0
|
||
)
|
||
db.add_mention(mention)
|
||
|
||
return {
|
||
"id": new_entity.id,
|
||
"name": new_entity.name,
|
||
"type": new_entity.type,
|
||
"definition": new_entity.definition,
|
||
"success": True
|
||
}
|
||
|
||
def transcribe_audio(audio_data: bytes, filename: str) -> dict:
|
||
"""转录音频:OSS上传 + 听悟转录"""
|
||
|
||
# 1. 上传 OSS
|
||
if not OSS_AVAILABLE:
|
||
print("OSS not available, using mock")
|
||
return mock_transcribe()
|
||
|
||
try:
|
||
uploader = get_oss_uploader()
|
||
audio_url, object_name = uploader.upload_audio(audio_data, filename)
|
||
print(f"Uploaded to OSS: {object_name}")
|
||
except Exception as e:
|
||
print(f"OSS upload failed: {e}")
|
||
return mock_transcribe()
|
||
|
||
# 2. 听悟转录
|
||
if not TINGWU_AVAILABLE:
|
||
print("Tingwu not available, using mock")
|
||
return mock_transcribe()
|
||
|
||
try:
|
||
client = TingwuClient()
|
||
result = client.transcribe(audio_url)
|
||
print(f"Transcription complete: {len(result['segments'])} segments")
|
||
return result
|
||
except Exception as e:
|
||
print(f"Tingwu failed: {e}")
|
||
return mock_transcribe()
|
||
|
||
def mock_transcribe() -> dict:
|
||
"""Mock 转录结果"""
|
||
return {
|
||
"full_text": "我们今天讨论 Project Alpha 的进度,K8s 集群已经部署完成。",
|
||
"segments": [
|
||
{"start": 0.0, "end": 5.0, "text": "我们今天讨论 Project Alpha 的进度,K8s 集群已经部署完成。", "speaker": "Speaker A"}
|
||
]
|
||
}
|
||
|
||
def extract_entities_with_llm(text: str) -> tuple[List[dict], List[dict]]:
|
||
"""使用 Kimi API 提取实体和关系
|
||
|
||
Returns:
|
||
(entities, relations): 实体列表和关系列表
|
||
"""
|
||
if not KIMI_API_KEY or not text:
|
||
return [], []
|
||
|
||
prompt = f"""从以下会议文本中提取关键实体和它们之间的关系,以 JSON 格式返回:
|
||
|
||
文本:{text[:3000]}
|
||
|
||
要求:
|
||
1. entities: 每个实体包含 name(名称), type(类型: PROJECT/TECH/PERSON/ORG/OTHER), definition(一句话定义)
|
||
2. relations: 每个关系包含 source(源实体名), target(目标实体名), type(关系类型: belongs_to/works_with/depends_on/mentions/related)
|
||
3. 只返回 JSON 对象,格式: {{"entities": [...], "relations": [...]}}
|
||
|
||
示例:
|
||
{{
|
||
"entities": [
|
||
{{"name": "Project Alpha", "type": "PROJECT", "definition": "核心项目"}},
|
||
{{"name": "K8s", "type": "TECH", "definition": "Kubernetes容器编排平台"}}
|
||
],
|
||
"relations": [
|
||
{{"source": "Project Alpha", "target": "K8s", "type": "depends_on"}}
|
||
]
|
||
}}
|
||
"""
|
||
|
||
try:
|
||
response = httpx.post(
|
||
f"{KIMI_BASE_URL}/v1/chat/completions",
|
||
headers={"Authorization": f"Bearer {KIMI_API_KEY}", "Content-Type": "application/json"},
|
||
json={"model": "k2p5", "messages": [{"role": "user", "content": prompt}], "temperature": 0.1},
|
||
timeout=60.0
|
||
)
|
||
response.raise_for_status()
|
||
result = response.json()
|
||
content = result["choices"][0]["message"]["content"]
|
||
|
||
import re
|
||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||
if json_match:
|
||
data = json.loads(json_match.group())
|
||
return data.get("entities", []), data.get("relations", [])
|
||
except Exception as e:
|
||
print(f"LLM extraction failed: {e}")
|
||
|
||
return [], []
|
||
|
||
def align_entity(project_id: str, name: str, db, definition: str = "") -> Optional['Entity']:
|
||
"""实体对齐 - Phase 3: 使用 embedding 对齐"""
|
||
# 1. 首先尝试精确匹配
|
||
existing = db.get_entity_by_name(project_id, name)
|
||
if existing:
|
||
return existing
|
||
|
||
# 2. 使用 embedding 对齐(如果可用)
|
||
aligner = get_aligner()
|
||
if aligner:
|
||
similar = aligner.find_similar_entity(project_id, name, definition)
|
||
if similar:
|
||
return similar
|
||
|
||
# 3. 回退到简单相似度匹配
|
||
similar = db.find_similar_entities(project_id, name)
|
||
if similar:
|
||
return similar[0]
|
||
|
||
return None
|
||
|
||
# API Endpoints
|
||
|
||
@app.post("/api/v1/projects", response_model=dict, tags=["Projects"])
|
||
async def create_project(project: ProjectCreate, _=Depends(verify_api_key)):
|
||
"""创建新项目"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project_id = str(uuid.uuid4())[:8]
|
||
p = db.create_project(project_id, project.name, project.description)
|
||
return {"id": p.id, "name": p.name, "description": p.description}
|
||
|
||
@app.get("/api/v1/projects", tags=["Projects"])
|
||
async def list_projects(_=Depends(verify_api_key)):
|
||
"""列出所有项目"""
|
||
if not DB_AVAILABLE:
|
||
return []
|
||
|
||
db = get_db_manager()
|
||
projects = db.list_projects()
|
||
return [{"id": p.id, "name": p.name, "description": p.description} for p in projects]
|
||
|
||
@app.post("/api/v1/projects/{project_id}/upload", response_model=AnalysisResult, tags=["Projects"])
|
||
async def upload_audio(project_id: str, file: UploadFile = File(...), _=Depends(verify_api_key)):
|
||
"""上传音频到指定项目 - Phase 3: 支持多文件融合"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
content = await file.read()
|
||
|
||
# 转录
|
||
print(f"Processing: {file.filename}")
|
||
tw_result = transcribe_audio(content, file.filename)
|
||
|
||
# 提取实体和关系
|
||
print("Extracting entities and relations...")
|
||
raw_entities, raw_relations = extract_entities_with_llm(tw_result["full_text"])
|
||
|
||
# 保存转录记录
|
||
transcript_id = str(uuid.uuid4())[:8]
|
||
db.save_transcript(
|
||
transcript_id=transcript_id,
|
||
project_id=project_id,
|
||
filename=file.filename,
|
||
full_text=tw_result["full_text"]
|
||
)
|
||
|
||
# 实体对齐并保存 - Phase 3: 使用增强对齐
|
||
aligned_entities = []
|
||
entity_name_to_id = {} # 用于关系映射
|
||
|
||
for raw_ent in raw_entities:
|
||
existing = align_entity(project_id, raw_ent["name"], db, raw_ent.get("definition", ""))
|
||
|
||
if existing:
|
||
ent_model = EntityModel(
|
||
id=existing.id,
|
||
name=existing.name,
|
||
type=existing.type,
|
||
definition=existing.definition,
|
||
aliases=existing.aliases
|
||
)
|
||
entity_name_to_id[raw_ent["name"]] = existing.id
|
||
else:
|
||
new_ent = db.create_entity(Entity(
|
||
id=str(uuid.uuid4())[:8],
|
||
project_id=project_id,
|
||
name=raw_ent["name"],
|
||
type=raw_ent.get("type", "OTHER"),
|
||
definition=raw_ent.get("definition", "")
|
||
))
|
||
ent_model = EntityModel(
|
||
id=new_ent.id,
|
||
name=new_ent.name,
|
||
type=new_ent.type,
|
||
definition=new_ent.definition
|
||
)
|
||
entity_name_to_id[raw_ent["name"]] = new_ent.id
|
||
|
||
aligned_entities.append(ent_model)
|
||
|
||
# 保存实体提及位置
|
||
full_text = tw_result["full_text"]
|
||
name = raw_ent["name"]
|
||
start_pos = 0
|
||
while True:
|
||
pos = full_text.find(name, start_pos)
|
||
if pos == -1:
|
||
break
|
||
mention = EntityMention(
|
||
id=str(uuid.uuid4())[:8],
|
||
entity_id=entity_name_to_id[name],
|
||
transcript_id=transcript_id,
|
||
start_pos=pos,
|
||
end_pos=pos + len(name),
|
||
text_snippet=full_text[max(0, pos-20):min(len(full_text), pos+len(name)+20)],
|
||
confidence=1.0
|
||
)
|
||
db.add_mention(mention)
|
||
start_pos = pos + 1
|
||
|
||
# 保存关系
|
||
for rel in raw_relations:
|
||
source_id = entity_name_to_id.get(rel.get("source", ""))
|
||
target_id = entity_name_to_id.get(rel.get("target", ""))
|
||
if source_id and target_id:
|
||
db.create_relation(
|
||
project_id=project_id,
|
||
source_entity_id=source_id,
|
||
target_entity_id=target_id,
|
||
relation_type=rel.get("type", "related"),
|
||
evidence=tw_result["full_text"][:200],
|
||
transcript_id=transcript_id
|
||
)
|
||
|
||
# 构建片段
|
||
segments = [TranscriptSegment(**seg) for seg in tw_result["segments"]]
|
||
|
||
return AnalysisResult(
|
||
transcript_id=transcript_id,
|
||
project_id=project_id,
|
||
segments=segments,
|
||
entities=aligned_entities,
|
||
full_text=tw_result["full_text"],
|
||
created_at=datetime.now().isoformat()
|
||
)
|
||
|
||
# Phase 3: Document Upload API
|
||
@app.post("/api/v1/projects/{project_id}/upload-document")
|
||
async def upload_document(project_id: str, file: UploadFile = File(...), _=Depends(verify_api_key)):
|
||
"""上传 PDF/DOCX 文档到指定项目"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
if not DOC_PROCESSOR_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Document processor not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
content = await file.read()
|
||
|
||
# 处理文档
|
||
processor = get_doc_processor()
|
||
try:
|
||
result = processor.process(content, file.filename)
|
||
except Exception as e:
|
||
raise HTTPException(status_code=400, detail=f"Document processing failed: {str(e)}")
|
||
|
||
# 保存文档转录记录
|
||
transcript_id = str(uuid.uuid4())[:8]
|
||
db.save_transcript(
|
||
transcript_id=transcript_id,
|
||
project_id=project_id,
|
||
filename=file.filename,
|
||
full_text=result["text"],
|
||
transcript_type="document"
|
||
)
|
||
|
||
# 提取实体和关系
|
||
raw_entities, raw_relations = extract_entities_with_llm(result["text"])
|
||
|
||
# 实体对齐并保存
|
||
aligned_entities = []
|
||
entity_name_to_id = {}
|
||
|
||
for raw_ent in raw_entities:
|
||
existing = align_entity(project_id, raw_ent["name"], db, raw_ent.get("definition", ""))
|
||
|
||
if existing:
|
||
entity_name_to_id[raw_ent["name"]] = existing.id
|
||
aligned_entities.append(EntityModel(
|
||
id=existing.id,
|
||
name=existing.name,
|
||
type=existing.type,
|
||
definition=existing.definition,
|
||
aliases=existing.aliases
|
||
))
|
||
else:
|
||
new_ent = db.create_entity(Entity(
|
||
id=str(uuid.uuid4())[:8],
|
||
project_id=project_id,
|
||
name=raw_ent["name"],
|
||
type=raw_ent.get("type", "OTHER"),
|
||
definition=raw_ent.get("definition", "")
|
||
))
|
||
entity_name_to_id[raw_ent["name"]] = new_ent.id
|
||
aligned_entities.append(EntityModel(
|
||
id=new_ent.id,
|
||
name=new_ent.name,
|
||
type=new_ent.type,
|
||
definition=new_ent.definition
|
||
))
|
||
|
||
# 保存实体提及位置
|
||
full_text = result["text"]
|
||
name = raw_ent["name"]
|
||
start_pos = 0
|
||
while True:
|
||
pos = full_text.find(name, start_pos)
|
||
if pos == -1:
|
||
break
|
||
mention = EntityMention(
|
||
id=str(uuid.uuid4())[:8],
|
||
entity_id=entity_name_to_id[name],
|
||
transcript_id=transcript_id,
|
||
start_pos=pos,
|
||
end_pos=pos + len(name),
|
||
text_snippet=full_text[max(0, pos-20):min(len(full_text), pos+len(name)+20)],
|
||
confidence=1.0
|
||
)
|
||
db.add_mention(mention)
|
||
start_pos = pos + 1
|
||
|
||
# 保存关系
|
||
for rel in raw_relations:
|
||
source_id = entity_name_to_id.get(rel.get("source", ""))
|
||
target_id = entity_name_to_id.get(rel.get("target", ""))
|
||
if source_id and target_id:
|
||
db.create_relation(
|
||
project_id=project_id,
|
||
source_entity_id=source_id,
|
||
target_entity_id=target_id,
|
||
relation_type=rel.get("type", "related"),
|
||
evidence=result["text"][:200],
|
||
transcript_id=transcript_id
|
||
)
|
||
|
||
return {
|
||
"transcript_id": transcript_id,
|
||
"project_id": project_id,
|
||
"filename": file.filename,
|
||
"text_length": len(result["text"]),
|
||
"entities": [e.dict() for e in aligned_entities],
|
||
"created_at": datetime.now().isoformat()
|
||
}
|
||
|
||
# Phase 3: Knowledge Base API
|
||
@app.get("/api/v1/projects/{project_id}/knowledge-base")
|
||
async def get_knowledge_base(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目知识库 - 包含所有实体、关系、术语表"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取所有实体
|
||
entities = db.list_project_entities(project_id)
|
||
|
||
# 获取所有关系
|
||
relations = db.list_project_relations(project_id)
|
||
|
||
# 获取所有转录
|
||
transcripts = db.list_project_transcripts(project_id)
|
||
|
||
# 获取术语表
|
||
glossary = db.list_glossary(project_id)
|
||
|
||
# 构建实体统计和属性
|
||
entity_stats = {}
|
||
entity_attributes = {}
|
||
for ent in entities:
|
||
mentions = db.get_entity_mentions(ent.id)
|
||
entity_stats[ent.id] = {
|
||
"mention_count": len(mentions),
|
||
"transcript_ids": list(set([m.transcript_id for m in mentions]))
|
||
}
|
||
# Phase 5: 获取实体属性
|
||
attrs = db.get_entity_attributes(ent.id)
|
||
entity_attributes[ent.id] = attrs
|
||
|
||
# 构建实体名称映射
|
||
entity_map = {e.id: e.name for e in entities}
|
||
|
||
return {
|
||
"project": {
|
||
"id": project.id,
|
||
"name": project.name,
|
||
"description": project.description
|
||
},
|
||
"stats": {
|
||
"entity_count": len(entities),
|
||
"relation_count": len(relations),
|
||
"transcript_count": len(transcripts),
|
||
"glossary_count": len(glossary)
|
||
},
|
||
"entities": [
|
||
{
|
||
"id": e.id,
|
||
"name": e.name,
|
||
"type": e.type,
|
||
"definition": e.definition,
|
||
"aliases": e.aliases,
|
||
"mention_count": entity_stats.get(e.id, {}).get("mention_count", 0),
|
||
"appears_in": entity_stats.get(e.id, {}).get("transcript_ids", []),
|
||
"attributes": entity_attributes.get(e.id, []) # Phase 5: 包含属性
|
||
}
|
||
for e in entities
|
||
],
|
||
"relations": [
|
||
{
|
||
"id": r["id"],
|
||
"source_id": r["source_entity_id"],
|
||
"source_name": entity_map.get(r["source_entity_id"], "Unknown"),
|
||
"target_id": r["target_entity_id"],
|
||
"target_name": entity_map.get(r["target_entity_id"], "Unknown"),
|
||
"type": r["relation_type"],
|
||
"evidence": r["evidence"]
|
||
}
|
||
for r in relations
|
||
],
|
||
"glossary": [
|
||
{
|
||
"id": g["id"],
|
||
"term": g["term"],
|
||
"pronunciation": g["pronunciation"],
|
||
"frequency": g["frequency"]
|
||
}
|
||
for g in glossary
|
||
],
|
||
"transcripts": [
|
||
{
|
||
"id": t["id"],
|
||
"filename": t["filename"],
|
||
"type": t.get("type", "audio"),
|
||
"created_at": t["created_at"]
|
||
}
|
||
for t in transcripts
|
||
]
|
||
}
|
||
|
||
# Phase 3: Glossary API
|
||
@app.post("/api/v1/projects/{project_id}/glossary")
|
||
async def add_glossary_term(project_id: str, term: GlossaryTermCreate, _=Depends(verify_api_key)):
|
||
"""添加术语到项目术语表"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
term_id = db.add_glossary_term(
|
||
project_id=project_id,
|
||
term=term.term,
|
||
pronunciation=term.pronunciation
|
||
)
|
||
|
||
return {
|
||
"id": term_id,
|
||
"term": term.term,
|
||
"pronunciation": term.pronunciation,
|
||
"success": True
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/glossary")
|
||
async def get_glossary(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目术语表"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
glossary = db.list_glossary(project_id)
|
||
return glossary
|
||
|
||
@app.delete("/api/v1/glossary/{term_id}")
|
||
async def delete_glossary_term(term_id: str, _=Depends(verify_api_key)):
|
||
"""删除术语"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
db.delete_glossary_term(term_id)
|
||
return {"success": True}
|
||
|
||
# Phase 3: Entity Alignment API
|
||
@app.post("/api/v1/projects/{project_id}/align-entities")
|
||
async def align_project_entities(project_id: str, threshold: float = 0.85, _=Depends(verify_api_key)):
|
||
"""运行实体对齐算法,合并相似实体"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
aligner = get_aligner()
|
||
if not aligner:
|
||
raise HTTPException(status_code=500, detail="Entity aligner not available")
|
||
|
||
db = get_db_manager()
|
||
entities = db.list_project_entities(project_id)
|
||
|
||
merged_count = 0
|
||
merged_pairs = []
|
||
|
||
# 使用 embedding 对齐
|
||
for i, entity in enumerate(entities):
|
||
# 跳过已合并的实体
|
||
existing = db.get_entity(entity.id)
|
||
if not existing:
|
||
continue
|
||
|
||
similar = aligner.find_similar_entity(
|
||
project_id,
|
||
entity.name,
|
||
entity.definition,
|
||
exclude_id=entity.id,
|
||
threshold=threshold
|
||
)
|
||
|
||
if similar:
|
||
# 合并实体
|
||
db.merge_entities(similar.id, entity.id)
|
||
merged_count += 1
|
||
merged_pairs.append({
|
||
"source": entity.name,
|
||
"target": similar.name
|
||
})
|
||
|
||
return {
|
||
"success": True,
|
||
"merged_count": merged_count,
|
||
"merged_pairs": merged_pairs
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/entities")
|
||
async def get_project_entities(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目的全局实体列表"""
|
||
if not DB_AVAILABLE:
|
||
return []
|
||
|
||
db = get_db_manager()
|
||
entities = db.list_project_entities(project_id)
|
||
return [{"id": e.id, "name": e.name, "type": e.type, "definition": e.definition, "aliases": e.aliases} for e in entities]
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/relations")
|
||
async def get_project_relations(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目的实体关系列表"""
|
||
if not DB_AVAILABLE:
|
||
return []
|
||
|
||
db = get_db_manager()
|
||
relations = db.list_project_relations(project_id)
|
||
|
||
# 获取实体名称映射
|
||
entities = db.list_project_entities(project_id)
|
||
entity_map = {e.id: e.name for e in entities}
|
||
|
||
return [{
|
||
"id": r["id"],
|
||
"source_id": r["source_entity_id"],
|
||
"source_name": entity_map.get(r["source_entity_id"], "Unknown"),
|
||
"target_id": r["target_entity_id"],
|
||
"target_name": entity_map.get(r["target_entity_id"], "Unknown"),
|
||
"type": r["relation_type"],
|
||
"evidence": r["evidence"]
|
||
} for r in relations]
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/transcripts")
|
||
async def get_project_transcripts(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目的转录列表"""
|
||
if not DB_AVAILABLE:
|
||
return []
|
||
|
||
db = get_db_manager()
|
||
transcripts = db.list_project_transcripts(project_id)
|
||
return [{
|
||
"id": t["id"],
|
||
"filename": t["filename"],
|
||
"type": t.get("type", "audio"),
|
||
"created_at": t["created_at"],
|
||
"preview": t["full_text"][:100] + "..." if len(t["full_text"]) > 100 else t["full_text"]
|
||
} for t in transcripts]
|
||
|
||
|
||
@app.get("/api/v1/entities/{entity_id}/mentions")
|
||
async def get_entity_mentions(entity_id: str, _=Depends(verify_api_key)):
|
||
"""获取实体的所有提及位置"""
|
||
if not DB_AVAILABLE:
|
||
return []
|
||
|
||
db = get_db_manager()
|
||
mentions = db.get_entity_mentions(entity_id)
|
||
return [{
|
||
"id": m.id,
|
||
"transcript_id": m.transcript_id,
|
||
"start_pos": m.start_pos,
|
||
"end_pos": m.end_pos,
|
||
"text_snippet": m.text_snippet,
|
||
"confidence": m.confidence
|
||
} for m in mentions]
|
||
|
||
# Health check
|
||
@app.get("/health")
|
||
async def health_check():
|
||
return {
|
||
"status": "ok",
|
||
"version": "0.7.0",
|
||
"phase": "Phase 7 - Plugin & Integration",
|
||
"oss_available": OSS_AVAILABLE,
|
||
"tingwu_available": TINGWU_AVAILABLE,
|
||
"db_available": DB_AVAILABLE,
|
||
"doc_processor_available": DOC_PROCESSOR_AVAILABLE,
|
||
"aligner_available": ALIGNER_AVAILABLE,
|
||
"llm_client_available": LLM_CLIENT_AVAILABLE,
|
||
"reasoner_available": REASONER_AVAILABLE,
|
||
"multimodal_available": MULTIMODAL_AVAILABLE,
|
||
"image_processor_available": IMAGE_PROCESSOR_AVAILABLE,
|
||
"multimodal_linker_available": MULTIMODAL_LINKER_AVAILABLE,
|
||
"plugin_manager_available": PLUGIN_MANAGER_AVAILABLE
|
||
}
|
||
|
||
|
||
# ==================== Phase 4: Agent 助手 API ====================
|
||
|
||
@app.post("/api/v1/projects/{project_id}/agent/query")
|
||
async def agent_query(project_id: str, query: AgentQuery, _=Depends(verify_api_key)):
|
||
"""Agent RAG 问答"""
|
||
if not DB_AVAILABLE or not LLM_CLIENT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Service not available")
|
||
|
||
db = get_db_manager()
|
||
llm = get_llm_client()
|
||
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目上下文
|
||
project_context = db.get_project_summary(project_id)
|
||
|
||
# 构建上下文
|
||
context_parts = []
|
||
for t in project_context.get('recent_transcripts', []):
|
||
context_parts.append(f"【{t['filename']}】\n{t['full_text'][:1000]}")
|
||
|
||
context = "\n\n".join(context_parts)
|
||
|
||
if query.stream:
|
||
from fastapi.responses import StreamingResponse
|
||
import json
|
||
|
||
async def stream_response():
|
||
messages = [
|
||
ChatMessage(role="system", content="你是一个专业的项目分析助手,擅长从会议记录中提取洞察。"),
|
||
ChatMessage(role="user", content=f"""基于以下项目信息回答问题:
|
||
|
||
## 项目信息
|
||
{json.dumps(project_context, ensure_ascii=False, indent=2)}
|
||
|
||
## 相关上下文
|
||
{context[:4000]}
|
||
|
||
## 用户问题
|
||
{query.query}
|
||
|
||
请用中文回答,保持简洁专业。如果信息不足,请明确说明。""")
|
||
]
|
||
|
||
async for chunk in llm.chat_stream(messages):
|
||
yield f"data: {json.dumps({'content': chunk})}\n\n"
|
||
yield "data: [DONE]\n\n"
|
||
|
||
return StreamingResponse(stream_response(), media_type="text/event-stream")
|
||
else:
|
||
answer = await llm.rag_query(query.query, context, project_context)
|
||
return {"answer": answer, "project_id": project_id}
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/agent/command")
|
||
async def agent_command(project_id: str, command: AgentCommand, _=Depends(verify_api_key)):
|
||
"""Agent 指令执行 - 解析并执行自然语言指令"""
|
||
if not DB_AVAILABLE or not LLM_CLIENT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Service not available")
|
||
|
||
db = get_db_manager()
|
||
llm = get_llm_client()
|
||
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目上下文
|
||
project_context = db.get_project_summary(project_id)
|
||
|
||
# 解析指令
|
||
parsed = await llm.agent_command(command.command, project_context)
|
||
|
||
intent = parsed.get("intent", "unknown")
|
||
params = parsed.get("params", {})
|
||
|
||
result = {"intent": intent, "explanation": parsed.get("explanation", "")}
|
||
|
||
# 执行指令
|
||
if intent == "merge_entities":
|
||
# 合并实体
|
||
source_names = params.get("source_names", [])
|
||
target_name = params.get("target_name", "")
|
||
|
||
target_entity = None
|
||
source_entities = []
|
||
|
||
# 查找目标实体
|
||
for e in project_context.get("top_entities", []):
|
||
if e["name"] == target_name or target_name in e["name"]:
|
||
target_entity = db.get_entity_by_name(project_id, e["name"])
|
||
break
|
||
|
||
# 查找源实体
|
||
for name in source_names:
|
||
for e in project_context.get("top_entities", []):
|
||
if e["name"] == name or name in e["name"]:
|
||
ent = db.get_entity_by_name(project_id, e["name"])
|
||
if ent and (not target_entity or ent.id != target_entity.id):
|
||
source_entities.append(ent)
|
||
break
|
||
|
||
merged = []
|
||
if target_entity:
|
||
for source in source_entities:
|
||
try:
|
||
db.merge_entities(target_entity.id, source.id)
|
||
merged.append(source.name)
|
||
except Exception as e:
|
||
print(f"Merge failed: {e}")
|
||
|
||
result["action"] = "merge_entities"
|
||
result["target"] = target_entity.name if target_entity else None
|
||
result["merged"] = merged
|
||
result["success"] = len(merged) > 0
|
||
|
||
elif intent == "answer_question":
|
||
# 问答 - 调用 RAG
|
||
answer = await llm.rag_query(params.get("question", command.command), "", project_context)
|
||
result["action"] = "answer"
|
||
result["answer"] = answer
|
||
|
||
elif intent == "edit_entity":
|
||
# 编辑实体
|
||
entity_name = params.get("entity_name", "")
|
||
field = params.get("field", "")
|
||
value = params.get("value", "")
|
||
|
||
entity = db.get_entity_by_name(project_id, entity_name)
|
||
if entity:
|
||
updated = db.update_entity(entity.id, **{field: value})
|
||
result["action"] = "edit_entity"
|
||
result["entity"] = {"id": updated.id, "name": updated.name} if updated else None
|
||
result["success"] = updated is not None
|
||
else:
|
||
result["success"] = False
|
||
result["error"] = "Entity not found"
|
||
|
||
else:
|
||
result["action"] = "none"
|
||
result["message"] = "无法理解的指令,请尝试:\n- 合并实体:把所有'客户端'合并到'App'\n- 提问:张总对项目的态度如何?\n- 编辑:修改'K8s'的定义为..."
|
||
|
||
return result
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/agent/suggest")
|
||
async def agent_suggest(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取 Agent 建议 - 基于项目数据提供洞察"""
|
||
if not DB_AVAILABLE or not LLM_CLIENT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Service not available")
|
||
|
||
db = get_db_manager()
|
||
llm = get_llm_client()
|
||
|
||
project_context = db.get_project_summary(project_id)
|
||
|
||
# 生成建议
|
||
prompt = f"""基于以下项目数据,提供3-5条分析建议:
|
||
|
||
{json.dumps(project_context, ensure_ascii=False, indent=2)}
|
||
|
||
请提供:
|
||
1. 数据洞察发现
|
||
2. 建议的操作(如合并相似实体、补充定义等)
|
||
3. 值得关注的关键信息
|
||
|
||
返回 JSON 格式:{{"suggestions": [{{"type": "insight|action", "title": "...", "description": "..."}}]}}"""
|
||
|
||
messages = [ChatMessage(role="user", content=prompt)]
|
||
content = await llm.chat(messages, temperature=0.3)
|
||
|
||
import re
|
||
json_match = re.search(r'\{{.*?\}}', content, re.DOTALL)
|
||
if json_match:
|
||
try:
|
||
data = json.loads(json_match.group())
|
||
return data
|
||
except:
|
||
pass
|
||
|
||
return {"suggestions": []}
|
||
|
||
|
||
# ==================== Phase 4: 知识溯源 API ====================
|
||
|
||
@app.get("/api/v1/relations/{relation_id}/provenance")
|
||
async def get_relation_provenance(relation_id: str, _=Depends(verify_api_key)):
|
||
"""获取关系的知识溯源信息"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
relation = db.get_relation_with_details(relation_id)
|
||
|
||
if not relation:
|
||
raise HTTPException(status_code=404, detail="Relation not found")
|
||
|
||
return {
|
||
"relation_id": relation_id,
|
||
"source": relation.get("source_name"),
|
||
"target": relation.get("target_name"),
|
||
"type": relation.get("relation_type"),
|
||
"evidence": relation.get("evidence"),
|
||
"transcript": {
|
||
"id": relation.get("transcript_id"),
|
||
"filename": relation.get("transcript_filename"),
|
||
} if relation.get("transcript_id") else None
|
||
}
|
||
|
||
|
||
@app.get("/api/v1/entities/{entity_id}/details")
|
||
async def get_entity_details(entity_id: str, _=Depends(verify_api_key)):
|
||
"""获取实体详情,包含所有提及位置"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
entity = db.get_entity_with_mentions(entity_id)
|
||
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
return entity
|
||
|
||
|
||
@app.get("/api/v1/entities/{entity_id}/evolution")
|
||
async def get_entity_evolution(entity_id: str, _=Depends(verify_api_key)):
|
||
"""分析实体的演变和态度变化"""
|
||
if not DB_AVAILABLE or not LLM_CLIENT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Service not available")
|
||
|
||
db = get_db_manager()
|
||
llm = get_llm_client()
|
||
|
||
entity = db.get_entity_with_mentions(entity_id)
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
# 分析演变
|
||
analysis = await llm.analyze_entity_evolution(entity["name"], entity.get("mentions", []))
|
||
|
||
return {
|
||
"entity_id": entity_id,
|
||
"entity_name": entity["name"],
|
||
"mention_count": entity.get("mention_count", 0),
|
||
"analysis": analysis,
|
||
"timeline": [
|
||
{
|
||
"date": m.get("transcript_date"),
|
||
"snippet": m.get("text_snippet"),
|
||
"transcript_id": m.get("transcript_id"),
|
||
"filename": m.get("filename")
|
||
}
|
||
for m in entity.get("mentions", [])
|
||
]
|
||
}
|
||
|
||
|
||
# ==================== Phase 4: 实体管理增强 API ====================
|
||
|
||
@app.get("/api/v1/projects/{project_id}/entities/search")
|
||
async def search_entities(project_id: str, q: str, _=Depends(verify_api_key)):
|
||
"""搜索实体"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
entities = db.search_entities(project_id, q)
|
||
return [{"id": e.id, "name": e.name, "type": e.type, "definition": e.definition} for e in entities]
|
||
|
||
|
||
# ==================== Phase 5: 时间线视图 API ====================
|
||
|
||
@app.get("/api/v1/projects/{project_id}/timeline")
|
||
async def get_project_timeline(
|
||
project_id: str,
|
||
entity_id: str = None,
|
||
start_date: str = None,
|
||
end_date: str = None,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""获取项目时间线 - 按时间顺序的实体提及和关系事件"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
timeline = db.get_project_timeline(project_id, entity_id, start_date, end_date)
|
||
|
||
return {
|
||
"project_id": project_id,
|
||
"events": timeline,
|
||
"total_count": len(timeline)
|
||
}
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/timeline/summary")
|
||
async def get_timeline_summary(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目时间线摘要统计"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
summary = db.get_entity_timeline_summary(project_id)
|
||
|
||
return {
|
||
"project_id": project_id,
|
||
"project_name": project.name,
|
||
**summary
|
||
}
|
||
|
||
|
||
@app.get("/api/v1/entities/{entity_id}/timeline")
|
||
async def get_entity_timeline(entity_id: str, _=Depends(verify_api_key)):
|
||
"""获取单个实体的时间线"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
entity = db.get_entity(entity_id)
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
timeline = db.get_project_timeline(entity.project_id, entity_id)
|
||
|
||
return {
|
||
"entity_id": entity_id,
|
||
"entity_name": entity.name,
|
||
"entity_type": entity.type,
|
||
"events": timeline,
|
||
"total_count": len(timeline)
|
||
}
|
||
|
||
|
||
# ==================== Phase 5: 知识推理与问答增强 API ====================
|
||
|
||
class ReasoningQuery(BaseModel):
|
||
query: str
|
||
reasoning_depth: str = "medium" # shallow/medium/deep
|
||
stream: bool = False
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/reasoning/query")
|
||
async def reasoning_query(project_id: str, query: ReasoningQuery, _=Depends(verify_api_key)):
|
||
"""
|
||
增强问答 - 基于知识推理的智能问答
|
||
|
||
支持多种推理类型:
|
||
- 因果推理:分析原因和影响
|
||
- 对比推理:比较实体间的异同
|
||
- 时序推理:分析时间线和演变
|
||
- 关联推理:发现隐含关联
|
||
"""
|
||
if not DB_AVAILABLE or not REASONER_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Knowledge reasoner not available")
|
||
|
||
db = get_db_manager()
|
||
reasoner = get_knowledge_reasoner()
|
||
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目上下文
|
||
project_context = db.get_project_summary(project_id)
|
||
|
||
# 获取知识图谱数据
|
||
entities = db.list_project_entities(project_id)
|
||
relations = db.list_project_relations(project_id)
|
||
|
||
graph_data = {
|
||
"entities": [{"id": e.id, "name": e.name, "type": e.type, "definition": e.definition} for e in entities],
|
||
"relations": relations
|
||
}
|
||
|
||
# 执行增强问答
|
||
result = await reasoner.enhanced_qa(
|
||
query=query.query,
|
||
project_context=project_context,
|
||
graph_data=graph_data,
|
||
reasoning_depth=query.reasoning_depth
|
||
)
|
||
|
||
return {
|
||
"answer": result.answer,
|
||
"reasoning_type": result.reasoning_type.value,
|
||
"confidence": result.confidence,
|
||
"evidence": result.evidence,
|
||
"knowledge_gaps": result.gaps,
|
||
"project_id": project_id
|
||
}
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/reasoning/inference-path")
|
||
async def find_inference_path(
|
||
project_id: str,
|
||
start_entity: str,
|
||
end_entity: str,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""
|
||
发现两个实体之间的推理路径
|
||
|
||
在知识图谱中搜索从 start_entity 到 end_entity 的路径
|
||
"""
|
||
if not DB_AVAILABLE or not REASONER_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Knowledge reasoner not available")
|
||
|
||
db = get_db_manager()
|
||
reasoner = get_knowledge_reasoner()
|
||
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取知识图谱数据
|
||
entities = db.list_project_entities(project_id)
|
||
relations = db.list_project_relations(project_id)
|
||
|
||
graph_data = {
|
||
"entities": [{"id": e.id, "name": e.name, "type": e.type} for e in entities],
|
||
"relations": relations
|
||
}
|
||
|
||
# 查找推理路径
|
||
paths = reasoner.find_inference_paths(start_entity, end_entity, graph_data)
|
||
|
||
return {
|
||
"start_entity": start_entity,
|
||
"end_entity": end_entity,
|
||
"paths": [
|
||
{
|
||
"path": path.path,
|
||
"strength": path.strength,
|
||
"path_description": " -> ".join([p["entity"] for p in path.path])
|
||
}
|
||
for path in paths[:5] # 最多返回5条路径
|
||
],
|
||
"total_paths": len(paths)
|
||
}
|
||
|
||
|
||
class SummaryRequest(BaseModel):
|
||
summary_type: str = "comprehensive" # comprehensive/executive/technical/risk
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/reasoning/summary")
|
||
async def project_summary(project_id: str, req: SummaryRequest, _=Depends(verify_api_key)):
|
||
"""
|
||
项目智能总结
|
||
|
||
根据类型生成不同侧重点的总结:
|
||
- comprehensive: 全面总结
|
||
- executive: 高管摘要
|
||
- technical: 技术总结
|
||
- risk: 风险分析
|
||
"""
|
||
if not DB_AVAILABLE or not REASONER_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Knowledge reasoner not available")
|
||
|
||
db = get_db_manager()
|
||
reasoner = get_knowledge_reasoner()
|
||
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目上下文
|
||
project_context = db.get_project_summary(project_id)
|
||
|
||
# 获取知识图谱数据
|
||
entities = db.list_project_entities(project_id)
|
||
relations = db.list_project_relations(project_id)
|
||
|
||
graph_data = {
|
||
"entities": [{"id": e.id, "name": e.name, "type": e.type} for e in entities],
|
||
"relations": relations
|
||
}
|
||
|
||
# 生成总结
|
||
summary = await reasoner.summarize_project(
|
||
project_context=project_context,
|
||
graph_data=graph_data,
|
||
summary_type=req.summary_type
|
||
)
|
||
|
||
return {
|
||
"project_id": project_id,
|
||
"summary_type": req.summary_type,
|
||
**summary
|
||
**summary
|
||
}
|
||
|
||
|
||
# ==================== Phase 5: 实体属性扩展 API ====================
|
||
|
||
class AttributeTemplateCreate(BaseModel):
|
||
name: str
|
||
type: str # text, number, date, select, multiselect, boolean
|
||
options: Optional[List[str]] = None
|
||
default_value: Optional[str] = ""
|
||
description: Optional[str] = ""
|
||
is_required: bool = False
|
||
sort_order: int = 0
|
||
|
||
|
||
class AttributeTemplateUpdate(BaseModel):
|
||
name: Optional[str] = None
|
||
type: Optional[str] = None
|
||
options: Optional[List[str]] = None
|
||
default_value: Optional[str] = None
|
||
description: Optional[str] = None
|
||
is_required: Optional[bool] = None
|
||
sort_order: Optional[int] = None
|
||
|
||
|
||
class EntityAttributeSet(BaseModel):
|
||
name: str
|
||
type: str
|
||
value: Optional[Union[str, int, float, List[str]]] = None
|
||
template_id: Optional[str] = None
|
||
options: Optional[List[str]] = None
|
||
change_reason: Optional[str] = ""
|
||
|
||
|
||
class EntityAttributeBatchSet(BaseModel):
|
||
attributes: List[EntityAttributeSet]
|
||
change_reason: Optional[str] = ""
|
||
|
||
|
||
# 属性模板管理 API
|
||
@app.post("/api/v1/projects/{project_id}/attribute-templates")
|
||
async def create_attribute_template_endpoint(project_id: str, template: AttributeTemplateCreate, _=Depends(verify_api_key)):
|
||
"""创建属性模板"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
from db_manager import AttributeTemplate
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
new_template = AttributeTemplate(
|
||
id=str(uuid.uuid4())[:8],
|
||
project_id=project_id,
|
||
name=template.name,
|
||
type=template.type,
|
||
options=template.options or [],
|
||
default_value=template.default_value or "",
|
||
description=template.description or "",
|
||
is_required=template.is_required,
|
||
sort_order=template.sort_order
|
||
)
|
||
|
||
db.create_attribute_template(new_template)
|
||
|
||
return {
|
||
"id": new_template.id,
|
||
"name": new_template.name,
|
||
"type": new_template.type,
|
||
"success": True
|
||
}
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/attribute-templates")
|
||
async def list_attribute_templates_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""列出项目的所有属性模板"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
templates = db.list_attribute_templates(project_id)
|
||
|
||
return [
|
||
{
|
||
"id": t.id,
|
||
"name": t.name,
|
||
"type": t.type,
|
||
"options": t.options,
|
||
"default_value": t.default_value,
|
||
"description": t.description,
|
||
"is_required": t.is_required,
|
||
"sort_order": t.sort_order
|
||
}
|
||
for t in templates
|
||
]
|
||
|
||
|
||
@app.get("/api/v1/attribute-templates/{template_id}")
|
||
async def get_attribute_template_endpoint(template_id: str, _=Depends(verify_api_key)):
|
||
"""获取属性模板详情"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
template = db.get_attribute_template(template_id)
|
||
|
||
if not template:
|
||
raise HTTPException(status_code=404, detail="Template not found")
|
||
|
||
return {
|
||
"id": template.id,
|
||
"name": template.name,
|
||
"type": template.type,
|
||
"options": template.options,
|
||
"default_value": template.default_value,
|
||
"description": template.description,
|
||
"is_required": template.is_required,
|
||
"sort_order": template.sort_order
|
||
}
|
||
|
||
|
||
@app.put("/api/v1/attribute-templates/{template_id}")
|
||
async def update_attribute_template_endpoint(template_id: str, update: AttributeTemplateUpdate, _=Depends(verify_api_key)):
|
||
"""更新属性模板"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
template = db.get_attribute_template(template_id)
|
||
if not template:
|
||
raise HTTPException(status_code=404, detail="Template not found")
|
||
|
||
update_data = {k: v for k, v in update.dict().items() if v is not None}
|
||
updated = db.update_attribute_template(template_id, **update_data)
|
||
|
||
return {
|
||
"id": updated.id,
|
||
"name": updated.name,
|
||
"type": updated.type,
|
||
"success": True
|
||
}
|
||
|
||
|
||
@app.delete("/api/v1/attribute-templates/{template_id}")
|
||
async def delete_attribute_template_endpoint(template_id: str, _=Depends(verify_api_key)):
|
||
"""删除属性模板"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
db.delete_attribute_template(template_id)
|
||
|
||
return {"success": True, "message": f"Template {template_id} deleted"}
|
||
|
||
|
||
# 实体属性值管理 API
|
||
@app.post("/api/v1/entities/{entity_id}/attributes")
|
||
async def set_entity_attribute_endpoint(entity_id: str, attr: EntityAttributeSet, _=Depends(verify_api_key)):
|
||
"""设置实体属性值"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
entity = db.get_entity(entity_id)
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
# 验证类型
|
||
valid_types = ['text', 'number', 'date', 'select', 'multiselect']
|
||
if attr.type not in valid_types:
|
||
raise HTTPException(status_code=400, detail=f"Invalid type. Must be one of: {valid_types}")
|
||
|
||
# 处理 value
|
||
value = attr.value
|
||
if attr.type == 'multiselect' and isinstance(value, list):
|
||
value = json.dumps(value)
|
||
elif value is not None:
|
||
value = str(value)
|
||
|
||
# 处理 options
|
||
options = attr.options
|
||
if options:
|
||
options = json.dumps(options)
|
||
|
||
# 检查是否已存在
|
||
conn = db.get_conn()
|
||
existing = conn.execute(
|
||
"SELECT * FROM entity_attributes WHERE entity_id = ? AND name = ?",
|
||
(entity_id, attr.name)
|
||
).fetchone()
|
||
|
||
now = datetime.now().isoformat()
|
||
|
||
if existing:
|
||
# 记录历史
|
||
conn.execute(
|
||
"""INSERT INTO attribute_history
|
||
(id, entity_id, attribute_name, old_value, new_value, changed_by, changed_at, change_reason)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(str(uuid.uuid4())[:8], entity_id, attr.name, existing['value'], value,
|
||
"user", now, attr.change_reason or "")
|
||
)
|
||
|
||
# 更新
|
||
conn.execute(
|
||
"""UPDATE entity_attributes
|
||
SET value = ?, type = ?, options = ?, updated_at = ?
|
||
WHERE id = ?""",
|
||
(value, attr.type, options, now, existing['id'])
|
||
)
|
||
attr_id = existing['id']
|
||
else:
|
||
# 创建
|
||
attr_id = str(uuid.uuid4())[:8]
|
||
conn.execute(
|
||
"""INSERT INTO entity_attributes
|
||
(id, entity_id, template_id, name, type, value, options, created_at, updated_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(attr_id, entity_id, attr.template_id, attr.name, attr.type, value, options, now, now)
|
||
)
|
||
|
||
# 记录历史
|
||
conn.execute(
|
||
"""INSERT INTO attribute_history
|
||
(id, entity_id, attribute_name, old_value, new_value, changed_by, changed_at, change_reason)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(str(uuid.uuid4())[:8], entity_id, attr.name, None, value,
|
||
"user", now, attr.change_reason or "创建属性")
|
||
)
|
||
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
return {
|
||
"id": attr_id,
|
||
"entity_id": entity_id,
|
||
"name": attr.name,
|
||
"type": attr.type,
|
||
"value": attr.value,
|
||
"success": True
|
||
}
|
||
|
||
|
||
@app.post("/api/v1/entities/{entity_id}/attributes/batch")
|
||
async def batch_set_entity_attributes_endpoint(entity_id: str, batch: EntityAttributeBatchSet, _=Depends(verify_api_key)):
|
||
"""批量设置实体属性值"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
from db_manager import EntityAttribute
|
||
|
||
db = get_db_manager()
|
||
entity = db.get_entity(entity_id)
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
results = []
|
||
for attr_data in batch.attributes:
|
||
template = db.get_attribute_template(attr_data.template_id)
|
||
if template:
|
||
new_attr = EntityAttribute(
|
||
id=str(uuid.uuid4())[:8],
|
||
entity_id=entity_id,
|
||
template_id=attr_data.template_id,
|
||
value=attr_data.value
|
||
)
|
||
db.set_entity_attribute(new_attr, changed_by="user",
|
||
change_reason=batch.change_reason or "批量更新")
|
||
results.append({
|
||
"template_id": attr_data.template_id,
|
||
"template_name": template.name,
|
||
"value": attr_data.value
|
||
})
|
||
|
||
return {
|
||
"entity_id": entity_id,
|
||
"updated_count": len(results),
|
||
"attributes": results,
|
||
"success": True
|
||
}
|
||
|
||
|
||
@app.get("/api/v1/entities/{entity_id}/attributes")
|
||
async def get_entity_attributes_endpoint(entity_id: str, _=Depends(verify_api_key)):
|
||
"""获取实体的所有属性值"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
entity = db.get_entity(entity_id)
|
||
if not entity:
|
||
raise HTTPException(status_code=404, detail="Entity not found")
|
||
|
||
attrs = db.get_entity_attributes(entity_id)
|
||
|
||
return [
|
||
{
|
||
"id": a.id,
|
||
"template_id": a.template_id,
|
||
"template_name": a.template_name,
|
||
"template_type": a.template_type,
|
||
"value": a.value
|
||
}
|
||
for a in attrs
|
||
]
|
||
|
||
|
||
@app.delete("/api/v1/entities/{entity_id}/attributes/{template_id}")
|
||
async def delete_entity_attribute_endpoint(entity_id: str, template_id: str,
|
||
reason: Optional[str] = "", _=Depends(verify_api_key)):
|
||
"""删除实体属性值"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
db.delete_entity_attribute(entity_id, template_id,
|
||
changed_by="user", change_reason=reason)
|
||
|
||
return {"success": True, "message": "Attribute deleted"}
|
||
|
||
|
||
# 属性历史 API
|
||
@app.get("/api/v1/entities/{entity_id}/attributes/history")
|
||
async def get_entity_attribute_history_endpoint(entity_id: str, limit: int = 50, _=Depends(verify_api_key)):
|
||
"""获取实体的属性变更历史"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
history = db.get_attribute_history(entity_id=entity_id, limit=limit)
|
||
|
||
return [
|
||
{
|
||
"id": h.id,
|
||
"attribute_name": h.attribute_name,
|
||
"old_value": h.old_value,
|
||
"new_value": h.new_value,
|
||
"changed_by": h.changed_by,
|
||
"changed_at": h.changed_at,
|
||
"change_reason": h.change_reason
|
||
}
|
||
for h in history
|
||
]
|
||
|
||
|
||
@app.get("/api/v1/attribute-templates/{template_id}/history")
|
||
async def get_template_history_endpoint(template_id: str, limit: int = 50, _=Depends(verify_api_key)):
|
||
"""获取属性模板的所有变更历史(跨实体)"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
history = db.get_attribute_history(template_id=template_id, limit=limit)
|
||
|
||
return [
|
||
{
|
||
"id": h.id,
|
||
"entity_id": h.entity_id,
|
||
"template_name": h.template_name,
|
||
"old_value": h.old_value,
|
||
"new_value": h.new_value,
|
||
"changed_by": h.changed_by,
|
||
"changed_at": h.changed_at,
|
||
"change_reason": h.change_reason
|
||
}
|
||
for h in history
|
||
]
|
||
|
||
|
||
# 属性筛选搜索 API
|
||
@app.get("/api/v1/projects/{project_id}/entities/search-by-attributes")
|
||
async def search_entities_by_attributes_endpoint(
|
||
project_id: str,
|
||
attribute_filter: Optional[str] = None, # JSON 格式: {"职位": "经理", "部门": "技术部"}
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""根据属性筛选搜索实体"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
filters = {}
|
||
if attribute_filter:
|
||
try:
|
||
filters = json.loads(attribute_filter)
|
||
except json.JSONDecodeError:
|
||
raise HTTPException(status_code=400, detail="Invalid attribute_filter JSON")
|
||
|
||
entities = db.search_entities_by_attributes(project_id, filters)
|
||
|
||
return [
|
||
{
|
||
"id": e.id,
|
||
"name": e.name,
|
||
"type": e.type,
|
||
"definition": e.definition,
|
||
"attributes": e.attributes
|
||
}
|
||
for e in entities
|
||
]
|
||
|
||
|
||
# ==================== 导出功能 API ====================
|
||
|
||
from fastapi.responses import StreamingResponse, FileResponse
|
||
|
||
@app.get("/api/v1/projects/{project_id}/export/graph-svg")
|
||
async def export_graph_svg_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""导出知识图谱为 SVG"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目数据
|
||
entities_data = db.get_project_entities(project_id)
|
||
relations_data = db.get_project_relations(project_id)
|
||
|
||
# 转换为导出格式
|
||
entities = []
|
||
for e in entities_data:
|
||
attrs = db.get_entity_attributes(e.id)
|
||
entities.append(ExportEntity(
|
||
id=e.id,
|
||
name=e.name,
|
||
type=e.type,
|
||
definition=e.definition or "",
|
||
aliases=json.loads(e.aliases) if e.aliases else [],
|
||
mention_count=e.mention_count,
|
||
attributes={a.template_name: a.value for a in attrs}
|
||
))
|
||
|
||
relations = []
|
||
for r in relations_data:
|
||
relations.append(ExportRelation(
|
||
id=r.id,
|
||
source=r.source_name,
|
||
target=r.target_name,
|
||
relation_type=r.relation_type,
|
||
confidence=r.confidence,
|
||
evidence=r.evidence or ""
|
||
))
|
||
|
||
export_mgr = get_export_manager()
|
||
svg_content = export_mgr.export_knowledge_graph_svg(project_id, entities, relations)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(svg_content.encode('utf-8')),
|
||
media_type="image/svg+xml",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-graph-{project_id}.svg"}
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/export/graph-png")
|
||
async def export_graph_png_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""导出知识图谱为 PNG"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目数据
|
||
entities_data = db.get_project_entities(project_id)
|
||
relations_data = db.get_project_relations(project_id)
|
||
|
||
# 转换为导出格式
|
||
entities = []
|
||
for e in entities_data:
|
||
attrs = db.get_entity_attributes(e.id)
|
||
entities.append(ExportEntity(
|
||
id=e.id,
|
||
name=e.name,
|
||
type=e.type,
|
||
definition=e.definition or "",
|
||
aliases=json.loads(e.aliases) if e.aliases else [],
|
||
mention_count=e.mention_count,
|
||
attributes={a.template_name: a.value for a in attrs}
|
||
))
|
||
|
||
relations = []
|
||
for r in relations_data:
|
||
relations.append(ExportRelation(
|
||
id=r.id,
|
||
source=r.source_name,
|
||
target=r.target_name,
|
||
relation_type=r.relation_type,
|
||
confidence=r.confidence,
|
||
evidence=r.evidence or ""
|
||
))
|
||
|
||
export_mgr = get_export_manager()
|
||
png_bytes = export_mgr.export_knowledge_graph_png(project_id, entities, relations)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(png_bytes),
|
||
media_type="image/png",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-graph-{project_id}.png"}
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/export/entities-excel")
|
||
async def export_entities_excel_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""导出实体数据为 Excel"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取实体数据
|
||
entities_data = db.get_project_entities(project_id)
|
||
|
||
entities = []
|
||
for e in entities_data:
|
||
attrs = db.get_entity_attributes(e.id)
|
||
entities.append(ExportEntity(
|
||
id=e.id,
|
||
name=e.name,
|
||
type=e.type,
|
||
definition=e.definition or "",
|
||
aliases=json.loads(e.aliases) if e.aliases else [],
|
||
mention_count=e.mention_count,
|
||
attributes={a.template_name: a.value for a in attrs}
|
||
))
|
||
|
||
export_mgr = get_export_manager()
|
||
excel_bytes = export_mgr.export_entities_excel(entities)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(excel_bytes),
|
||
media_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-entities-{project_id}.xlsx"}
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/export/entities-csv")
|
||
async def export_entities_csv_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""导出实体数据为 CSV"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取实体数据
|
||
entities_data = db.get_project_entities(project_id)
|
||
|
||
entities = []
|
||
for e in entities_data:
|
||
attrs = db.get_entity_attributes(e.id)
|
||
entities.append(ExportEntity(
|
||
id=e.id,
|
||
name=e.name,
|
||
type=e.type,
|
||
definition=e.definition or "",
|
||
aliases=json.loads(e.aliases) if e.aliases else [],
|
||
mention_count=e.mention_count,
|
||
attributes={a.template_name: a.value for a in attrs}
|
||
))
|
||
|
||
export_mgr = get_export_manager()
|
||
csv_content = export_mgr.export_entities_csv(entities)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(csv_content.encode('utf-8')),
|
||
media_type="text/csv",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-entities-{project_id}.csv"}
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/export/relations-csv")
|
||
async def export_relations_csv_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""导出关系数据为 CSV"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取关系数据
|
||
relations_data = db.get_project_relations(project_id)
|
||
|
||
relations = []
|
||
for r in relations_data:
|
||
relations.append(ExportRelation(
|
||
id=r.id,
|
||
source=r.source_name,
|
||
target=r.target_name,
|
||
relation_type=r.relation_type,
|
||
confidence=r.confidence,
|
||
evidence=r.evidence or ""
|
||
))
|
||
|
||
export_mgr = get_export_manager()
|
||
csv_content = export_mgr.export_relations_csv(relations)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(csv_content.encode('utf-8')),
|
||
media_type="text/csv",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-relations-{project_id}.csv"}
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/export/report-pdf")
|
||
async def export_report_pdf_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""导出项目报告为 PDF"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目数据
|
||
entities_data = db.get_project_entities(project_id)
|
||
relations_data = db.get_project_relations(project_id)
|
||
transcripts_data = db.get_project_transcripts(project_id)
|
||
|
||
# 转换为导出格式
|
||
entities = []
|
||
for e in entities_data:
|
||
attrs = db.get_entity_attributes(e.id)
|
||
entities.append(ExportEntity(
|
||
id=e.id,
|
||
name=e.name,
|
||
type=e.type,
|
||
definition=e.definition or "",
|
||
aliases=json.loads(e.aliases) if e.aliases else [],
|
||
mention_count=e.mention_count,
|
||
attributes={a.template_name: a.value for a in attrs}
|
||
))
|
||
|
||
relations = []
|
||
for r in relations_data:
|
||
relations.append(ExportRelation(
|
||
id=r.id,
|
||
source=r.source_name,
|
||
target=r.target_name,
|
||
relation_type=r.relation_type,
|
||
confidence=r.confidence,
|
||
evidence=r.evidence or ""
|
||
))
|
||
|
||
transcripts = []
|
||
for t in transcripts_data:
|
||
segments = json.loads(t.segments) if t.segments else []
|
||
transcripts.append(ExportTranscript(
|
||
id=t.id,
|
||
name=t.name,
|
||
type=t.type,
|
||
content=t.full_text or "",
|
||
segments=segments,
|
||
entity_mentions=[]
|
||
))
|
||
|
||
# 获取项目总结
|
||
summary = ""
|
||
if REASONER_AVAILABLE:
|
||
try:
|
||
reasoner = get_knowledge_reasoner()
|
||
summary_result = reasoner.generate_project_summary(project_id, db)
|
||
summary = summary_result.get("summary", "")
|
||
except:
|
||
pass
|
||
|
||
export_mgr = get_export_manager()
|
||
pdf_bytes = export_mgr.export_project_report_pdf(
|
||
project_id, project.name, entities, relations, transcripts, summary
|
||
)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(pdf_bytes),
|
||
media_type="application/pdf",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-report-{project_id}.pdf"}
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/export/project-json")
|
||
async def export_project_json_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""导出完整项目数据为 JSON"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目数据
|
||
entities_data = db.get_project_entities(project_id)
|
||
relations_data = db.get_project_relations(project_id)
|
||
transcripts_data = db.get_project_transcripts(project_id)
|
||
|
||
# 转换为导出格式
|
||
entities = []
|
||
for e in entities_data:
|
||
attrs = db.get_entity_attributes(e.id)
|
||
entities.append(ExportEntity(
|
||
id=e.id,
|
||
name=e.name,
|
||
type=e.type,
|
||
definition=e.definition or "",
|
||
aliases=json.loads(e.aliases) if e.aliases else [],
|
||
mention_count=e.mention_count,
|
||
attributes={a.template_name: a.value for a in attrs}
|
||
))
|
||
|
||
relations = []
|
||
for r in relations_data:
|
||
relations.append(ExportRelation(
|
||
id=r.id,
|
||
source=r.source_name,
|
||
target=r.target_name,
|
||
relation_type=r.relation_type,
|
||
confidence=r.confidence,
|
||
evidence=r.evidence or ""
|
||
))
|
||
|
||
transcripts = []
|
||
for t in transcripts_data:
|
||
segments = json.loads(t.segments) if t.segments else []
|
||
transcripts.append(ExportTranscript(
|
||
id=t.id,
|
||
name=t.name,
|
||
type=t.type,
|
||
content=t.full_text or "",
|
||
segments=segments,
|
||
entity_mentions=[]
|
||
))
|
||
|
||
export_mgr = get_export_manager()
|
||
json_content = export_mgr.export_project_json(
|
||
project_id, project.name, entities, relations, transcripts
|
||
)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(json_content.encode('utf-8')),
|
||
media_type="application/json",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-project-{project_id}.json"}
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/transcripts/{transcript_id}/export/markdown")
|
||
async def export_transcript_markdown_endpoint(transcript_id: str, _=Depends(verify_api_key)):
|
||
"""导出转录文本为 Markdown"""
|
||
if not DB_AVAILABLE or not EXPORT_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Export functionality not available")
|
||
|
||
db = get_db_manager()
|
||
transcript = db.get_transcript(transcript_id)
|
||
if not transcript:
|
||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||
|
||
# 获取实体提及
|
||
mentions = db.get_transcript_entity_mentions(transcript_id)
|
||
|
||
# 获取项目实体用于映射
|
||
entities_data = db.get_project_entities(transcript.project_id)
|
||
entities_map = {e.id: ExportEntity(
|
||
id=e.id,
|
||
name=e.name,
|
||
type=e.type,
|
||
definition=e.definition or "",
|
||
aliases=json.loads(e.aliases) if e.aliases else [],
|
||
mention_count=e.mention_count,
|
||
attributes={}
|
||
) for e in entities_data}
|
||
|
||
segments = json.loads(transcript.segments) if transcript.segments else []
|
||
|
||
export_transcript = ExportTranscript(
|
||
id=transcript.id,
|
||
name=transcript.name,
|
||
type=transcript.type,
|
||
content=transcript.full_text or "",
|
||
segments=segments,
|
||
entity_mentions=[{
|
||
"entity_id": m.entity_id,
|
||
"entity_name": m.entity_name,
|
||
"position": m.position,
|
||
"context": m.context
|
||
} for m in mentions]
|
||
)
|
||
|
||
export_mgr = get_export_manager()
|
||
markdown_content = export_mgr.export_transcript_markdown(export_transcript, entities_map)
|
||
|
||
return StreamingResponse(
|
||
io.BytesIO(markdown_content.encode('utf-8')),
|
||
media_type="text/markdown",
|
||
headers={"Content-Disposition": f"attachment; filename=insightflow-transcript-{transcript_id}.md"}
|
||
)
|
||
|
||
|
||
# ==================== Neo4j Graph Database API ====================
|
||
|
||
class Neo4jSyncRequest(BaseModel):
|
||
project_id: str
|
||
|
||
class PathQueryRequest(BaseModel):
|
||
source_entity_id: str
|
||
target_entity_id: str
|
||
max_depth: int = 10
|
||
|
||
class GraphQueryRequest(BaseModel):
|
||
entity_ids: List[str]
|
||
depth: int = 1
|
||
|
||
@app.get("/api/v1/neo4j/status")
|
||
async def neo4j_status(_=Depends(verify_api_key)):
|
||
"""获取 Neo4j 连接状态"""
|
||
if not NEO4J_AVAILABLE:
|
||
return {
|
||
"available": False,
|
||
"connected": False,
|
||
"message": "Neo4j driver not installed"
|
||
}
|
||
|
||
try:
|
||
manager = get_neo4j_manager()
|
||
connected = manager.is_connected()
|
||
return {
|
||
"available": True,
|
||
"connected": connected,
|
||
"uri": manager.uri if connected else None,
|
||
"message": "Connected" if connected else "Not connected"
|
||
}
|
||
except Exception as e:
|
||
return {
|
||
"available": True,
|
||
"connected": False,
|
||
"message": str(e)
|
||
}
|
||
|
||
@app.post("/api/v1/neo4j/sync")
|
||
async def neo4j_sync_project(request: Neo4jSyncRequest, _=Depends(verify_api_key)):
|
||
"""同步项目数据到 Neo4j"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(request.project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取项目所有实体
|
||
entities = db.get_project_entities(request.project_id)
|
||
entities_data = []
|
||
for e in entities:
|
||
entities_data.append({
|
||
"id": e.id,
|
||
"name": e.name,
|
||
"type": e.type,
|
||
"definition": e.definition,
|
||
"aliases": json.loads(e.aliases) if e.aliases else [],
|
||
"properties": e.attributes if hasattr(e, 'attributes') else {}
|
||
})
|
||
|
||
# 获取项目所有关系
|
||
relations = db.get_project_relations(request.project_id)
|
||
relations_data = []
|
||
for r in relations:
|
||
relations_data.append({
|
||
"id": r.id,
|
||
"source_entity_id": r.source_entity_id,
|
||
"target_entity_id": r.target_entity_id,
|
||
"relation_type": r.relation_type,
|
||
"evidence": r.evidence,
|
||
"properties": {}
|
||
})
|
||
|
||
# 同步到 Neo4j
|
||
sync_project_to_neo4j(
|
||
project_id=request.project_id,
|
||
project_name=project.name,
|
||
entities=entities_data,
|
||
relations=relations_data
|
||
)
|
||
|
||
return {
|
||
"success": True,
|
||
"project_id": request.project_id,
|
||
"entities_synced": len(entities_data),
|
||
"relations_synced": len(relations_data),
|
||
"message": f"Synced {len(entities_data)} entities and {len(relations_data)} relations to Neo4j"
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/graph/stats")
|
||
async def get_graph_stats(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目图统计信息"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
stats = manager.get_graph_stats(project_id)
|
||
return stats
|
||
|
||
@app.post("/api/v1/graph/shortest-path")
|
||
async def find_shortest_path(request: PathQueryRequest, _=Depends(verify_api_key)):
|
||
"""查找两个实体之间的最短路径"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
path = manager.find_shortest_path(
|
||
request.source_entity_id,
|
||
request.target_entity_id,
|
||
request.max_depth
|
||
)
|
||
|
||
if not path:
|
||
return {
|
||
"found": False,
|
||
"message": "No path found between entities"
|
||
}
|
||
|
||
return {
|
||
"found": True,
|
||
"path": {
|
||
"nodes": path.nodes,
|
||
"relationships": path.relationships,
|
||
"length": path.length
|
||
}
|
||
}
|
||
|
||
@app.post("/api/v1/graph/paths")
|
||
async def find_all_paths(request: PathQueryRequest, _=Depends(verify_api_key)):
|
||
"""查找两个实体之间的所有路径"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
paths = manager.find_all_paths(
|
||
request.source_entity_id,
|
||
request.target_entity_id,
|
||
request.max_depth
|
||
)
|
||
|
||
return {
|
||
"count": len(paths),
|
||
"paths": [
|
||
{
|
||
"nodes": p.nodes,
|
||
"relationships": p.relationships,
|
||
"length": p.length
|
||
}
|
||
for p in paths
|
||
]
|
||
}
|
||
|
||
@app.get("/api/v1/entities/{entity_id}/neighbors")
|
||
async def get_entity_neighbors(
|
||
entity_id: str,
|
||
relation_type: str = None,
|
||
limit: int = 50,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""获取实体的邻居节点"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
neighbors = manager.find_neighbors(entity_id, relation_type, limit)
|
||
return {
|
||
"entity_id": entity_id,
|
||
"count": len(neighbors),
|
||
"neighbors": neighbors
|
||
}
|
||
|
||
@app.get("/api/v1/entities/{entity_id1}/common-neighbors/{entity_id2}")
|
||
async def get_common_neighbors(entity_id1: str, entity_id2: str, _=Depends(verify_api_key)):
|
||
"""获取两个实体的共同邻居"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
common = manager.find_common_neighbors(entity_id1, entity_id2)
|
||
return {
|
||
"entity_id1": entity_id1,
|
||
"entity_id2": entity_id2,
|
||
"count": len(common),
|
||
"common_neighbors": common
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/graph/centrality")
|
||
async def get_centrality_analysis(
|
||
project_id: str,
|
||
metric: str = "degree",
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""获取中心性分析结果"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
rankings = manager.find_central_entities(project_id, metric)
|
||
return {
|
||
"metric": metric,
|
||
"count": len(rankings),
|
||
"rankings": [
|
||
{
|
||
"entity_id": r.entity_id,
|
||
"entity_name": r.entity_name,
|
||
"score": r.score,
|
||
"rank": r.rank
|
||
}
|
||
for r in rankings
|
||
]
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/graph/communities")
|
||
async def get_communities(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取社区发现结果"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
communities = manager.detect_communities(project_id)
|
||
return {
|
||
"count": len(communities),
|
||
"communities": [
|
||
{
|
||
"community_id": c.community_id,
|
||
"size": c.size,
|
||
"density": c.density,
|
||
"nodes": c.nodes
|
||
}
|
||
for c in communities
|
||
]
|
||
}
|
||
|
||
@app.post("/api/v1/graph/subgraph")
|
||
async def get_subgraph(request: GraphQueryRequest, _=Depends(verify_api_key)):
|
||
"""获取子图"""
|
||
if not NEO4J_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Neo4j not available")
|
||
|
||
manager = get_neo4j_manager()
|
||
if not manager.is_connected():
|
||
raise HTTPException(status_code=503, detail="Neo4j not connected")
|
||
|
||
subgraph = manager.get_subgraph(request.entity_ids, request.depth)
|
||
return subgraph
|
||
|
||
|
||
# ==================== Phase 6: API Key Management Endpoints ====================
|
||
|
||
@app.post("/api/v1/api-keys", response_model=ApiKeyCreateResponse, tags=["API Keys"])
|
||
async def create_api_key(request: ApiKeyCreate, _=Depends(verify_api_key)):
|
||
"""
|
||
创建新的 API Key
|
||
|
||
- **name**: API Key 的名称/描述
|
||
- **permissions**: 权限列表,可选值: read, write, delete
|
||
- **rate_limit**: 每分钟请求限制,默认 60
|
||
- **expires_days**: 过期天数(可选,不设置则永不过期)
|
||
"""
|
||
if not API_KEY_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="API Key management not available")
|
||
|
||
key_manager = get_api_key_manager()
|
||
raw_key, api_key = key_manager.create_key(
|
||
name=request.name,
|
||
permissions=request.permissions,
|
||
rate_limit=request.rate_limit,
|
||
expires_days=request.expires_days
|
||
)
|
||
|
||
return ApiKeyCreateResponse(
|
||
api_key=raw_key,
|
||
info=ApiKeyResponse(
|
||
id=api_key.id,
|
||
key_preview=api_key.key_preview,
|
||
name=api_key.name,
|
||
permissions=api_key.permissions,
|
||
rate_limit=api_key.rate_limit,
|
||
status=api_key.status,
|
||
created_at=api_key.created_at,
|
||
expires_at=api_key.expires_at,
|
||
last_used_at=api_key.last_used_at,
|
||
total_calls=api_key.total_calls
|
||
)
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/api-keys", response_model=ApiKeyListResponse, tags=["API Keys"])
|
||
async def list_api_keys(
|
||
status: Optional[str] = None,
|
||
limit: int = 100,
|
||
offset: int = 0,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""
|
||
列出所有 API Keys
|
||
|
||
- **status**: 按状态筛选 (active, revoked, expired)
|
||
- **limit**: 返回数量限制
|
||
- **offset**: 分页偏移
|
||
"""
|
||
if not API_KEY_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="API Key management not available")
|
||
|
||
key_manager = get_api_key_manager()
|
||
keys = key_manager.list_keys(status=status, limit=limit, offset=offset)
|
||
|
||
return ApiKeyListResponse(
|
||
keys=[
|
||
ApiKeyResponse(
|
||
id=k.id,
|
||
key_preview=k.key_preview,
|
||
name=k.name,
|
||
permissions=k.permissions,
|
||
rate_limit=k.rate_limit,
|
||
status=k.status,
|
||
created_at=k.created_at,
|
||
expires_at=k.expires_at,
|
||
last_used_at=k.last_used_at,
|
||
total_calls=k.total_calls
|
||
)
|
||
for k in keys
|
||
],
|
||
total=len(keys)
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/api-keys/{key_id}", response_model=ApiKeyResponse, tags=["API Keys"])
|
||
async def get_api_key(key_id: str, _=Depends(verify_api_key)):
|
||
"""获取单个 API Key 详情"""
|
||
if not API_KEY_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="API Key management not available")
|
||
|
||
key_manager = get_api_key_manager()
|
||
key = key_manager.get_key_by_id(key_id)
|
||
|
||
if not key:
|
||
raise HTTPException(status_code=404, detail="API Key not found")
|
||
|
||
return ApiKeyResponse(
|
||
id=key.id,
|
||
key_preview=key.key_preview,
|
||
name=key.name,
|
||
permissions=key.permissions,
|
||
rate_limit=key.rate_limit,
|
||
status=key.status,
|
||
created_at=key.created_at,
|
||
expires_at=key.expires_at,
|
||
last_used_at=key.last_used_at,
|
||
total_calls=key.total_calls
|
||
)
|
||
|
||
|
||
@app.patch("/api/v1/api-keys/{key_id}", response_model=ApiKeyResponse, tags=["API Keys"])
|
||
async def update_api_key(key_id: str, request: ApiKeyUpdate, _=Depends(verify_api_key)):
|
||
"""
|
||
更新 API Key 信息
|
||
|
||
可以更新的字段:name, permissions, rate_limit
|
||
"""
|
||
if not API_KEY_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="API Key management not available")
|
||
|
||
key_manager = get_api_key_manager()
|
||
|
||
# 构建更新数据
|
||
updates = {}
|
||
if request.name is not None:
|
||
updates["name"] = request.name
|
||
if request.permissions is not None:
|
||
updates["permissions"] = request.permissions
|
||
if request.rate_limit is not None:
|
||
updates["rate_limit"] = request.rate_limit
|
||
|
||
if not updates:
|
||
raise HTTPException(status_code=400, detail="No fields to update")
|
||
|
||
success = key_manager.update_key(key_id, **updates)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="API Key not found")
|
||
|
||
# 返回更新后的 key
|
||
key = key_manager.get_key_by_id(key_id)
|
||
return ApiKeyResponse(
|
||
id=key.id,
|
||
key_preview=key.key_preview,
|
||
name=key.name,
|
||
permissions=key.permissions,
|
||
rate_limit=key.rate_limit,
|
||
status=key.status,
|
||
created_at=key.created_at,
|
||
expires_at=key.expires_at,
|
||
last_used_at=key.last_used_at,
|
||
total_calls=key.total_calls
|
||
)
|
||
|
||
|
||
@app.delete("/api/v1/api-keys/{key_id}", tags=["API Keys"])
|
||
async def revoke_api_key(key_id: str, reason: str = "", _=Depends(verify_api_key)):
|
||
"""
|
||
撤销 API Key
|
||
|
||
撤销后的 Key 将无法再使用,但记录会保留用于审计
|
||
"""
|
||
if not API_KEY_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="API Key management not available")
|
||
|
||
key_manager = get_api_key_manager()
|
||
success = key_manager.revoke_key(key_id, reason=reason)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="API Key not found or already revoked")
|
||
|
||
return {"success": True, "message": f"API Key {key_id} revoked"}
|
||
|
||
|
||
@app.get("/api/v1/api-keys/{key_id}/stats", response_model=ApiStatsResponse, tags=["API Keys"])
|
||
async def get_api_key_stats(key_id: str, days: int = 30, _=Depends(verify_api_key)):
|
||
"""
|
||
获取 API Key 的调用统计
|
||
|
||
- **days**: 统计天数,默认 30 天
|
||
"""
|
||
if not API_KEY_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="API Key management not available")
|
||
|
||
key_manager = get_api_key_manager()
|
||
|
||
# 验证 key 存在
|
||
key = key_manager.get_key_by_id(key_id)
|
||
if not key:
|
||
raise HTTPException(status_code=404, detail="API Key not found")
|
||
|
||
stats = key_manager.get_call_stats(key_id, days=days)
|
||
|
||
return ApiStatsResponse(
|
||
summary=ApiCallStats(**stats["summary"]),
|
||
endpoints=stats["endpoints"],
|
||
daily=stats["daily"]
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/api-keys/{key_id}/logs", response_model=ApiLogsResponse, tags=["API Keys"])
|
||
async def get_api_key_logs(
|
||
key_id: str,
|
||
limit: int = 100,
|
||
offset: int = 0,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""
|
||
获取 API Key 的调用日志
|
||
|
||
- **limit**: 返回数量限制
|
||
- **offset**: 分页偏移
|
||
"""
|
||
if not API_KEY_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="API Key management not available")
|
||
|
||
key_manager = get_api_key_manager()
|
||
|
||
# 验证 key 存在
|
||
key = key_manager.get_key_by_id(key_id)
|
||
if not key:
|
||
raise HTTPException(status_code=404, detail="API Key not found")
|
||
|
||
logs = key_manager.get_call_logs(key_id, limit=limit, offset=offset)
|
||
|
||
return ApiLogsResponse(
|
||
logs=[
|
||
ApiCallLog(
|
||
id=log["id"],
|
||
endpoint=log["endpoint"],
|
||
method=log["method"],
|
||
status_code=log["status_code"],
|
||
response_time_ms=log["response_time_ms"],
|
||
ip_address=log["ip_address"],
|
||
user_agent=log["user_agent"],
|
||
error_message=log["error_message"],
|
||
created_at=log["created_at"]
|
||
)
|
||
for log in logs
|
||
],
|
||
total=len(logs)
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/rate-limit/status", response_model=RateLimitStatus, tags=["API Keys"])
|
||
async def get_rate_limit_status(request: Request, _=Depends(verify_api_key)):
|
||
"""获取当前请求的限流状态"""
|
||
if not RATE_LIMITER_AVAILABLE:
|
||
return RateLimitStatus(
|
||
limit=60,
|
||
remaining=60,
|
||
reset_time=int(time.time()) + 60,
|
||
window="minute"
|
||
)
|
||
|
||
limiter = get_rate_limiter()
|
||
|
||
# 获取限流键
|
||
if hasattr(request.state, 'api_key') and request.state.api_key:
|
||
api_key = request.state.api_key
|
||
limit_key = f"api_key:{api_key.id}"
|
||
limit = api_key.rate_limit
|
||
else:
|
||
client_ip = request.client.host if request.client else "unknown"
|
||
limit_key = f"ip:{client_ip}"
|
||
limit = 10
|
||
|
||
info = await limiter.get_limit_info(limit_key)
|
||
|
||
return RateLimitStatus(
|
||
limit=limit,
|
||
remaining=info.remaining,
|
||
reset_time=info.reset_time,
|
||
window="minute"
|
||
)
|
||
|
||
|
||
# ==================== Phase 6: System Endpoints ====================
|
||
|
||
@app.get("/api/v1/health", tags=["System"])
|
||
async def health_check():
|
||
"""健康检查端点"""
|
||
return {
|
||
"status": "healthy",
|
||
"version": "0.7.0",
|
||
"timestamp": datetime.now().isoformat()
|
||
}
|
||
|
||
|
||
@app.get("/api/v1/status", tags=["System"])
|
||
async def system_status():
|
||
"""系统状态信息"""
|
||
status = {
|
||
"version": "0.7.0",
|
||
"phase": "Phase 7 - Plugin & Integration",
|
||
"features": {
|
||
"database": DB_AVAILABLE,
|
||
"oss": OSS_AVAILABLE,
|
||
"tingwu": TINGWU_AVAILABLE,
|
||
"llm": LLM_CLIENT_AVAILABLE,
|
||
"neo4j": NEO4J_AVAILABLE,
|
||
"export": EXPORT_AVAILABLE,
|
||
"api_keys": API_KEY_AVAILABLE,
|
||
"rate_limiting": RATE_LIMITER_AVAILABLE,
|
||
"workflow": WORKFLOW_AVAILABLE,
|
||
"multimodal": MULTIMODAL_AVAILABLE,
|
||
"multimodal_linker": MULTIMODAL_LINKER_AVAILABLE,
|
||
"plugin_manager": PLUGIN_MANAGER_AVAILABLE,
|
||
},
|
||
"api": {
|
||
"documentation": "/docs",
|
||
"openapi": "/openapi.json",
|
||
},
|
||
"timestamp": datetime.now().isoformat()
|
||
}
|
||
|
||
return status
|
||
|
||
|
||
# ==================== Phase 7: Workflow Automation Endpoints ====================
|
||
|
||
# Workflow Manager singleton
|
||
_workflow_manager = None
|
||
|
||
def get_workflow_manager_instance():
|
||
global _workflow_manager
|
||
if _workflow_manager is None and WORKFLOW_AVAILABLE and DB_AVAILABLE:
|
||
from workflow_manager import WorkflowManager
|
||
db = get_db_manager()
|
||
_workflow_manager = WorkflowManager(db)
|
||
_workflow_manager.start()
|
||
return _workflow_manager
|
||
|
||
|
||
@app.post("/api/v1/workflows", response_model=WorkflowResponse, tags=["Workflows"])
|
||
async def create_workflow_endpoint(request: WorkflowCreate, _=Depends(verify_api_key)):
|
||
"""
|
||
创建工作流
|
||
|
||
工作流类型:
|
||
- **auto_analyze**: 自动分析新上传的文件
|
||
- **auto_align**: 自动实体对齐
|
||
- **auto_relation**: 自动关系发现
|
||
- **scheduled_report**: 定时报告
|
||
- **custom**: 自定义工作流
|
||
|
||
调度类型:
|
||
- **manual**: 手动触发
|
||
- **cron**: Cron 表达式调度
|
||
- **interval**: 间隔调度(分钟数)
|
||
|
||
定时规则示例:
|
||
- `0 9 * * *` - 每天上午9点 (cron)
|
||
- `60` - 每60分钟执行一次 (interval)
|
||
"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
|
||
try:
|
||
workflow = Workflow(
|
||
id=str(uuid.uuid4())[:8],
|
||
name=request.name,
|
||
description=request.description,
|
||
workflow_type=request.workflow_type,
|
||
project_id=request.project_id,
|
||
schedule=request.schedule,
|
||
schedule_type=request.schedule_type,
|
||
config=request.config,
|
||
webhook_ids=request.webhook_ids
|
||
)
|
||
|
||
created = manager.create_workflow(workflow)
|
||
|
||
return WorkflowResponse(
|
||
id=created.id,
|
||
name=created.name,
|
||
description=created.description,
|
||
workflow_type=created.workflow_type,
|
||
project_id=created.project_id,
|
||
status=created.status,
|
||
schedule=created.schedule,
|
||
schedule_type=created.schedule_type,
|
||
config=created.config,
|
||
webhook_ids=created.webhook_ids,
|
||
is_active=created.is_active,
|
||
created_at=created.created_at,
|
||
updated_at=created.updated_at,
|
||
last_run_at=created.last_run_at,
|
||
next_run_at=created.next_run_at,
|
||
run_count=created.run_count,
|
||
success_count=created.success_count,
|
||
fail_count=created.fail_count
|
||
)
|
||
except ValueError as e:
|
||
raise HTTPException(status_code=400, detail=str(e))
|
||
|
||
|
||
@app.get("/api/v1/workflows", response_model=WorkflowListResponse, tags=["Workflows"])
|
||
async def list_workflows_endpoint(
|
||
project_id: Optional[str] = None,
|
||
status: Optional[str] = None,
|
||
workflow_type: Optional[str] = None,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""获取工作流列表"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
workflows = manager.list_workflows(project_id, status, workflow_type)
|
||
|
||
return WorkflowListResponse(
|
||
workflows=[
|
||
WorkflowResponse(
|
||
id=w.id,
|
||
name=w.name,
|
||
description=w.description,
|
||
workflow_type=w.workflow_type,
|
||
project_id=w.project_id,
|
||
status=w.status,
|
||
schedule=w.schedule,
|
||
schedule_type=w.schedule_type,
|
||
config=w.config,
|
||
webhook_ids=w.webhook_ids,
|
||
is_active=w.is_active,
|
||
created_at=w.created_at,
|
||
updated_at=w.updated_at,
|
||
last_run_at=w.last_run_at,
|
||
next_run_at=w.next_run_at,
|
||
run_count=w.run_count,
|
||
success_count=w.success_count,
|
||
fail_count=w.fail_count
|
||
)
|
||
for w in workflows
|
||
],
|
||
total=len(workflows)
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/workflows/{workflow_id}", response_model=WorkflowResponse, tags=["Workflows"])
|
||
async def get_workflow_endpoint(workflow_id: str, _=Depends(verify_api_key)):
|
||
"""获取单个工作流详情"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
workflow = manager.get_workflow(workflow_id)
|
||
|
||
if not workflow:
|
||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||
|
||
return WorkflowResponse(
|
||
id=workflow.id,
|
||
name=workflow.name,
|
||
description=workflow.description,
|
||
workflow_type=workflow.workflow_type,
|
||
project_id=workflow.project_id,
|
||
status=workflow.status,
|
||
schedule=workflow.schedule,
|
||
schedule_type=workflow.schedule_type,
|
||
config=workflow.config,
|
||
webhook_ids=workflow.webhook_ids,
|
||
is_active=workflow.is_active,
|
||
created_at=workflow.created_at,
|
||
updated_at=workflow.updated_at,
|
||
last_run_at=workflow.last_run_at,
|
||
next_run_at=workflow.next_run_at,
|
||
run_count=workflow.run_count,
|
||
success_count=workflow.success_count,
|
||
fail_count=workflow.fail_count
|
||
)
|
||
|
||
|
||
@app.patch("/api/v1/workflows/{workflow_id}", response_model=WorkflowResponse, tags=["Workflows"])
|
||
async def update_workflow_endpoint(workflow_id: str, request: WorkflowUpdate, _=Depends(verify_api_key)):
|
||
"""更新工作流"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
|
||
update_data = {k: v for k, v in request.dict().items() if v is not None}
|
||
updated = manager.update_workflow(workflow_id, **update_data)
|
||
|
||
if not updated:
|
||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||
|
||
return WorkflowResponse(
|
||
id=updated.id,
|
||
name=updated.name,
|
||
description=updated.description,
|
||
workflow_type=updated.workflow_type,
|
||
project_id=updated.project_id,
|
||
status=updated.status,
|
||
schedule=updated.schedule,
|
||
schedule_type=updated.schedule_type,
|
||
config=updated.config,
|
||
webhook_ids=updated.webhook_ids,
|
||
is_active=updated.is_active,
|
||
created_at=updated.created_at,
|
||
updated_at=updated.updated_at,
|
||
last_run_at=updated.last_run_at,
|
||
next_run_at=updated.next_run_at,
|
||
run_count=updated.run_count,
|
||
success_count=updated.success_count,
|
||
fail_count=updated.fail_count
|
||
)
|
||
|
||
|
||
@app.delete("/api/v1/workflows/{workflow_id}", tags=["Workflows"])
|
||
async def delete_workflow_endpoint(workflow_id: str, _=Depends(verify_api_key)):
|
||
"""删除工作流"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
success = manager.delete_workflow(workflow_id)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Workflow not found")
|
||
|
||
return {"success": True, "message": "Workflow deleted successfully"}
|
||
|
||
|
||
@app.post("/api/v1/workflows/{workflow_id}/trigger", response_model=WorkflowTriggerResponse, tags=["Workflows"])
|
||
async def trigger_workflow_endpoint(workflow_id: str, request: WorkflowTriggerRequest = None, _=Depends(verify_api_key)):
|
||
"""手动触发工作流"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
|
||
try:
|
||
result = await manager.execute_workflow(
|
||
workflow_id,
|
||
input_data=request.input_data if request else {}
|
||
)
|
||
|
||
return WorkflowTriggerResponse(
|
||
success=result["success"],
|
||
workflow_id=result["workflow_id"],
|
||
log_id=result["log_id"],
|
||
results=result["results"],
|
||
duration_ms=result["duration_ms"]
|
||
)
|
||
except ValueError as e:
|
||
raise HTTPException(status_code=404, detail=str(e))
|
||
except Exception as e:
|
||
raise HTTPException(status_code=500, detail=str(e))
|
||
|
||
|
||
@app.get("/api/v1/workflows/{workflow_id}/logs", response_model=WorkflowLogListResponse, tags=["Workflows"])
|
||
async def get_workflow_logs_endpoint(
|
||
workflow_id: str,
|
||
status: Optional[str] = None,
|
||
limit: int = 100,
|
||
offset: int = 0,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""获取工作流执行日志"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
logs = manager.list_logs(workflow_id=workflow_id, status=status, limit=limit, offset=offset)
|
||
|
||
return WorkflowLogListResponse(
|
||
logs=[
|
||
WorkflowLogResponse(
|
||
id=log.id,
|
||
workflow_id=log.workflow_id,
|
||
task_id=log.task_id,
|
||
status=log.status,
|
||
start_time=log.start_time,
|
||
end_time=log.end_time,
|
||
duration_ms=log.duration_ms,
|
||
input_data=log.input_data,
|
||
output_data=log.output_data,
|
||
error_message=log.error_message,
|
||
created_at=log.created_at
|
||
)
|
||
for log in logs
|
||
],
|
||
total=len(logs)
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/workflows/{workflow_id}/stats", response_model=WorkflowStatsResponse, tags=["Workflows"])
|
||
async def get_workflow_stats_endpoint(workflow_id: str, days: int = 30, _=Depends(verify_api_key)):
|
||
"""获取工作流执行统计"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
stats = manager.get_workflow_stats(workflow_id, days)
|
||
|
||
return WorkflowStatsResponse(**stats)
|
||
|
||
|
||
# ==================== Phase 7: Webhook Endpoints ====================
|
||
|
||
@app.post("/api/v1/webhooks", response_model=WebhookResponse, tags=["Webhooks"])
|
||
async def create_webhook_endpoint(request: WebhookCreate, _=Depends(verify_api_key)):
|
||
"""
|
||
创建 Webhook 配置
|
||
|
||
Webhook 类型:
|
||
- **feishu**: 飞书机器人
|
||
- **dingtalk**: 钉钉机器人
|
||
- **slack**: Slack Incoming Webhook
|
||
- **custom**: 自定义 Webhook
|
||
"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
|
||
try:
|
||
webhook = WebhookConfig(
|
||
id=str(uuid.uuid4())[:8],
|
||
name=request.name,
|
||
webhook_type=request.webhook_type,
|
||
url=request.url,
|
||
secret=request.secret,
|
||
headers=request.headers,
|
||
template=request.template
|
||
)
|
||
|
||
created = manager.create_webhook(webhook)
|
||
|
||
return WebhookResponse(
|
||
id=created.id,
|
||
name=created.name,
|
||
webhook_type=created.webhook_type,
|
||
url=created.url,
|
||
headers=created.headers,
|
||
template=created.template,
|
||
is_active=created.is_active,
|
||
created_at=created.created_at,
|
||
updated_at=created.updated_at,
|
||
last_used_at=created.last_used_at,
|
||
success_count=created.success_count,
|
||
fail_count=created.fail_count
|
||
)
|
||
except ValueError as e:
|
||
raise HTTPException(status_code=400, detail=str(e))
|
||
|
||
|
||
@app.get("/api/v1/webhooks", response_model=WebhookListResponse, tags=["Webhooks"])
|
||
async def list_webhooks_endpoint(_=Depends(verify_api_key)):
|
||
"""获取 Webhook 列表"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
webhooks = manager.list_webhooks()
|
||
|
||
return WebhookListResponse(
|
||
webhooks=[
|
||
WebhookResponse(
|
||
id=w.id,
|
||
name=w.name,
|
||
webhook_type=w.webhook_type,
|
||
url=w.url,
|
||
headers=w.headers,
|
||
template=w.template,
|
||
is_active=w.is_active,
|
||
created_at=w.created_at,
|
||
updated_at=w.updated_at,
|
||
last_used_at=w.last_used_at,
|
||
success_count=w.success_count,
|
||
fail_count=w.fail_count
|
||
)
|
||
for w in webhooks
|
||
],
|
||
total=len(webhooks)
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/webhooks/{webhook_id}", response_model=WebhookResponse, tags=["Webhooks"])
|
||
async def get_webhook_endpoint(webhook_id: str, _=Depends(verify_api_key)):
|
||
"""获取单个 Webhook 详情"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
webhook = manager.get_webhook(webhook_id)
|
||
|
||
if not webhook:
|
||
raise HTTPException(status_code=404, detail="Webhook not found")
|
||
|
||
return WebhookResponse(
|
||
id=webhook.id,
|
||
name=webhook.name,
|
||
webhook_type=webhook.webhook_type,
|
||
url=webhook.url,
|
||
headers=webhook.headers,
|
||
template=webhook.template,
|
||
is_active=webhook.is_active,
|
||
created_at=webhook.created_at,
|
||
updated_at=webhook.updated_at,
|
||
last_used_at=webhook.last_used_at,
|
||
success_count=webhook.success_count,
|
||
fail_count=webhook.fail_count
|
||
)
|
||
|
||
|
||
@app.patch("/api/v1/webhooks/{webhook_id}", response_model=WebhookResponse, tags=["Webhooks"])
|
||
async def update_webhook_endpoint(webhook_id: str, request: WebhookUpdate, _=Depends(verify_api_key)):
|
||
"""更新 Webhook 配置"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
|
||
update_data = {k: v for k, v in request.dict().items() if v is not None}
|
||
updated = manager.update_webhook(webhook_id, **update_data)
|
||
|
||
if not updated:
|
||
raise HTTPException(status_code=404, detail="Webhook not found")
|
||
|
||
return WebhookResponse(
|
||
id=updated.id,
|
||
name=updated.name,
|
||
webhook_type=updated.webhook_type,
|
||
url=updated.url,
|
||
headers=updated.headers,
|
||
template=updated.template,
|
||
is_active=updated.is_active,
|
||
created_at=updated.created_at,
|
||
updated_at=updated.updated_at,
|
||
last_used_at=updated.last_used_at,
|
||
success_count=updated.success_count,
|
||
fail_count=updated.fail_count
|
||
)
|
||
|
||
|
||
@app.delete("/api/v1/webhooks/{webhook_id}", tags=["Webhooks"])
|
||
async def delete_webhook_endpoint(webhook_id: str, _=Depends(verify_api_key)):
|
||
"""删除 Webhook 配置"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
success = manager.delete_webhook(webhook_id)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Webhook not found")
|
||
|
||
return {"success": True, "message": "Webhook deleted successfully"}
|
||
|
||
|
||
@app.post("/api/v1/webhooks/{webhook_id}/test", tags=["Webhooks"])
|
||
async def test_webhook_endpoint(webhook_id: str, _=Depends(verify_api_key)):
|
||
"""测试 Webhook 配置"""
|
||
if not WORKFLOW_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Workflow automation not available")
|
||
|
||
manager = get_workflow_manager_instance()
|
||
webhook = manager.get_webhook(webhook_id)
|
||
|
||
if not webhook:
|
||
raise HTTPException(status_code=404, detail="Webhook not found")
|
||
|
||
# 构建测试消息
|
||
test_message = {
|
||
"content": "🔔 这是来自 InsightFlow 的 Webhook 测试消息\n\n如果您收到这条消息,说明 Webhook 配置正确!"
|
||
}
|
||
|
||
if webhook.webhook_type == "slack":
|
||
test_message = {"text": "🔔 这是来自 InsightFlow 的 Webhook 测试消息\n\n如果您收到这条消息,说明 Webhook 配置正确!"}
|
||
|
||
success = await manager.notifier.send(webhook, test_message)
|
||
manager.update_webhook_stats(webhook_id, success)
|
||
|
||
if success:
|
||
return {"success": True, "message": "Webhook test sent successfully"}
|
||
else:
|
||
raise HTTPException(status_code=400, detail="Webhook test failed")
|
||
|
||
|
||
# ==================== Phase 7: Multimodal Support Endpoints ====================
|
||
|
||
# Pydantic Models for Multimodal API
|
||
class VideoUploadResponse(BaseModel):
|
||
video_id: str
|
||
project_id: str
|
||
filename: str
|
||
status: str
|
||
audio_extracted: bool
|
||
frame_count: int
|
||
ocr_text_preview: str
|
||
message: str
|
||
|
||
|
||
class ImageUploadResponse(BaseModel):
|
||
image_id: str
|
||
project_id: str
|
||
filename: str
|
||
image_type: str
|
||
ocr_text_preview: str
|
||
description: str
|
||
entity_count: int
|
||
status: str
|
||
|
||
|
||
class MultimodalEntityLinkResponse(BaseModel):
|
||
link_id: str
|
||
source_entity_id: str
|
||
target_entity_id: str
|
||
source_modality: str
|
||
target_modality: str
|
||
link_type: str
|
||
confidence: float
|
||
evidence: str
|
||
|
||
|
||
class MultimodalAlignmentRequest(BaseModel):
|
||
project_id: str
|
||
threshold: float = 0.85
|
||
|
||
|
||
class MultimodalAlignmentResponse(BaseModel):
|
||
project_id: str
|
||
aligned_count: int
|
||
links: List[MultimodalEntityLinkResponse]
|
||
message: str
|
||
|
||
|
||
class MultimodalStatsResponse(BaseModel):
|
||
project_id: str
|
||
video_count: int
|
||
image_count: int
|
||
multimodal_entity_count: int
|
||
cross_modal_links: int
|
||
modality_distribution: Dict[str, int]
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/upload-video", response_model=VideoUploadResponse, tags=["Multimodal"])
|
||
async def upload_video_endpoint(
|
||
project_id: str,
|
||
file: UploadFile = File(...),
|
||
extract_interval: int = Form(5),
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""
|
||
上传视频文件进行处理
|
||
|
||
- 提取音频轨道
|
||
- 提取关键帧(每 N 秒一帧)
|
||
- 对关键帧进行 OCR 识别
|
||
- 将视频、音频、OCR 结果整合
|
||
|
||
**参数:**
|
||
- **extract_interval**: 关键帧提取间隔(秒),默认 5 秒
|
||
"""
|
||
if not MULTIMODAL_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Multimodal processing not available")
|
||
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 读取视频文件
|
||
video_data = await file.read()
|
||
|
||
# 创建视频处理器
|
||
processor = get_multimodal_processor(frame_interval=extract_interval)
|
||
|
||
# 处理视频
|
||
video_id = str(uuid.uuid4())[:8]
|
||
result = processor.process_video(video_data, file.filename, project_id, video_id)
|
||
|
||
if not result.success:
|
||
raise HTTPException(status_code=500, detail=f"Video processing failed: {result.error_message}")
|
||
|
||
# 保存视频信息到数据库
|
||
conn = db.get_conn()
|
||
now = datetime.now().isoformat()
|
||
|
||
# 获取视频信息
|
||
video_info = processor.extract_video_info(os.path.join(processor.video_dir, f"{video_id}_{file.filename}"))
|
||
|
||
conn.execute(
|
||
"""INSERT INTO videos
|
||
(id, project_id, filename, duration, fps, resolution,
|
||
audio_transcript_id, full_ocr_text, extracted_entities,
|
||
extracted_relations, status, created_at, updated_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(video_id, project_id, file.filename, video_info.get('duration', 0),
|
||
video_info.get('fps', 0),
|
||
json.dumps({'width': video_info.get('width', 0), 'height': video_info.get('height', 0)}),
|
||
None, result.full_text, '[]', '[]', 'completed', now, now)
|
||
)
|
||
|
||
# 保存关键帧信息
|
||
for frame in result.frames:
|
||
conn.execute(
|
||
"""INSERT INTO video_frames
|
||
(id, video_id, frame_number, timestamp, image_url, ocr_text, extracted_entities, created_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(frame.id, frame.video_id, frame.frame_number, frame.timestamp,
|
||
frame.frame_path, frame.ocr_text, json.dumps(frame.entities_detected), now)
|
||
)
|
||
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
# 提取实体和关系(复用现有的 LLM 提取逻辑)
|
||
if result.full_text:
|
||
raw_entities, raw_relations = extract_entities_with_llm(result.full_text)
|
||
|
||
# 实体对齐并保存
|
||
entity_name_to_id = {}
|
||
for raw_ent in raw_entities:
|
||
existing = align_entity(project_id, raw_ent["name"], db, raw_ent.get("definition", ""))
|
||
|
||
if existing:
|
||
entity_name_to_id[raw_ent["name"]] = existing.id
|
||
else:
|
||
new_ent = db.create_entity(Entity(
|
||
id=str(uuid.uuid4())[:8],
|
||
project_id=project_id,
|
||
name=raw_ent["name"],
|
||
type=raw_ent.get("type", "OTHER"),
|
||
definition=raw_ent.get("definition", "")
|
||
))
|
||
entity_name_to_id[raw_ent["name"]] = new_ent.id
|
||
|
||
# 保存多模态实体提及
|
||
conn = db.get_conn()
|
||
conn.execute(
|
||
"""INSERT OR REPLACE INTO multimodal_mentions
|
||
(id, project_id, entity_id, modality, source_id, source_type, text_snippet, confidence, created_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(str(uuid.uuid4())[:8], project_id, entity_name_to_id[raw_ent["name"]],
|
||
'video', video_id, 'video_frame', raw_ent.get("name", ""), 1.0, now)
|
||
)
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
# 保存关系
|
||
for rel in raw_relations:
|
||
source_id = entity_name_to_id.get(rel.get("source", ""))
|
||
target_id = entity_name_to_id.get(rel.get("target", ""))
|
||
if source_id and target_id:
|
||
db.create_relation(
|
||
project_id=project_id,
|
||
source_entity_id=source_id,
|
||
target_entity_id=target_id,
|
||
relation_type=rel.get("type", "related"),
|
||
evidence=result.full_text[:200]
|
||
)
|
||
|
||
# 更新视频的实体和关系信息
|
||
conn = db.get_conn()
|
||
conn.execute(
|
||
"UPDATE videos SET extracted_entities = ?, extracted_relations = ? WHERE id = ?",
|
||
(json.dumps(raw_entities), json.dumps(raw_relations), video_id)
|
||
)
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
return VideoUploadResponse(
|
||
video_id=video_id,
|
||
project_id=project_id,
|
||
filename=file.filename,
|
||
status="completed",
|
||
audio_extracted=bool(result.audio_path),
|
||
frame_count=len(result.frames),
|
||
ocr_text_preview=result.full_text[:200] + "..." if len(result.full_text) > 200 else result.full_text,
|
||
message="Video processed successfully"
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/upload-image", response_model=ImageUploadResponse, tags=["Multimodal"])
|
||
async def upload_image_endpoint(
|
||
project_id: str,
|
||
file: UploadFile = File(...),
|
||
detect_type: bool = Form(True),
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""
|
||
上传图片文件进行处理
|
||
|
||
- 图片内容识别(白板、PPT、手写笔记)
|
||
- 使用 OCR 识别图片中的文字
|
||
- 提取图片中的实体和关系
|
||
|
||
**参数:**
|
||
- **detect_type**: 是否自动检测图片类型,默认 True
|
||
"""
|
||
if not IMAGE_PROCESSOR_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Image processing not available")
|
||
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 读取图片文件
|
||
image_data = await file.read()
|
||
|
||
# 创建图片处理器
|
||
processor = get_image_processor()
|
||
|
||
# 处理图片
|
||
image_id = str(uuid.uuid4())[:8]
|
||
result = processor.process_image(image_data, file.filename, image_id, detect_type)
|
||
|
||
if not result.success:
|
||
raise HTTPException(status_code=500, detail=f"Image processing failed: {result.error_message}")
|
||
|
||
# 保存图片信息到数据库
|
||
conn = db.get_conn()
|
||
now = datetime.now().isoformat()
|
||
|
||
conn.execute(
|
||
"""INSERT INTO images
|
||
(id, project_id, filename, ocr_text, description,
|
||
extracted_entities, extracted_relations, status, created_at, updated_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(image_id, project_id, file.filename, result.ocr_text, result.description,
|
||
json.dumps([{"name": e.name, "type": e.type, "confidence": e.confidence} for e in result.entities]),
|
||
json.dumps([{"source": r.source, "target": r.target, "type": r.relation_type} for r in result.relations]),
|
||
'completed', now, now)
|
||
)
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
# 保存提取的实体
|
||
for entity in result.entities:
|
||
existing = align_entity(project_id, entity.name, db, "")
|
||
|
||
if not existing:
|
||
new_ent = db.create_entity(Entity(
|
||
id=str(uuid.uuid4())[:8],
|
||
project_id=project_id,
|
||
name=entity.name,
|
||
type=entity.type,
|
||
definition=""
|
||
))
|
||
entity_id = new_ent.id
|
||
else:
|
||
entity_id = existing.id
|
||
|
||
# 保存多模态实体提及
|
||
conn = db.get_conn()
|
||
conn.execute(
|
||
"""INSERT OR REPLACE INTO multimodal_mentions
|
||
(id, project_id, entity_id, modality, source_id, source_type, text_snippet, confidence, created_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(str(uuid.uuid4())[:8], project_id, entity_id,
|
||
'image', image_id, result.image_type, entity.name, entity.confidence, now)
|
||
)
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
# 保存提取的关系
|
||
for relation in result.relations:
|
||
source_entity = db.get_entity_by_name(project_id, relation.source)
|
||
target_entity = db.get_entity_by_name(project_id, relation.target)
|
||
|
||
if source_entity and target_entity:
|
||
db.create_relation(
|
||
project_id=project_id,
|
||
source_entity_id=source_entity.id,
|
||
target_entity_id=target_entity.id,
|
||
relation_type=relation.relation_type,
|
||
evidence=result.ocr_text[:200]
|
||
)
|
||
|
||
return ImageUploadResponse(
|
||
image_id=image_id,
|
||
project_id=project_id,
|
||
filename=file.filename,
|
||
image_type=result.image_type,
|
||
ocr_text_preview=result.ocr_text[:200] + "..." if len(result.ocr_text) > 200 else result.ocr_text,
|
||
description=result.description,
|
||
entity_count=len(result.entities),
|
||
status="completed"
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/upload-images-batch", tags=["Multimodal"])
|
||
async def upload_images_batch_endpoint(
|
||
project_id: str,
|
||
files: List[UploadFile] = File(...),
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""
|
||
批量上传图片文件进行处理
|
||
|
||
支持一次上传多张图片,每张图片都会进行 OCR 和实体提取
|
||
"""
|
||
if not IMAGE_PROCESSOR_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Image processing not available")
|
||
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 读取所有图片
|
||
images_data = []
|
||
for file in files:
|
||
image_data = await file.read()
|
||
images_data.append((image_data, file.filename))
|
||
|
||
# 批量处理
|
||
processor = get_image_processor()
|
||
batch_result = processor.process_batch(images_data, project_id)
|
||
|
||
# 保存结果
|
||
results = []
|
||
for result in batch_result.results:
|
||
if result.success:
|
||
image_id = result.image_id
|
||
|
||
# 保存到数据库
|
||
conn = db.get_conn()
|
||
now = datetime.now().isoformat()
|
||
|
||
conn.execute(
|
||
"""INSERT INTO images
|
||
(id, project_id, filename, ocr_text, description,
|
||
extracted_entities, extracted_relations, status, created_at, updated_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(image_id, project_id, "batch_image", result.ocr_text, result.description,
|
||
json.dumps([{"name": e.name, "type": e.type} for e in result.entities]),
|
||
json.dumps([{"source": r.source, "target": r.target} for r in result.relations]),
|
||
'completed', now, now)
|
||
)
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
results.append({
|
||
"image_id": image_id,
|
||
"status": "success",
|
||
"image_type": result.image_type,
|
||
"entity_count": len(result.entities)
|
||
})
|
||
else:
|
||
results.append({
|
||
"image_id": result.image_id,
|
||
"status": "failed",
|
||
"error": result.error_message
|
||
})
|
||
|
||
return {
|
||
"project_id": project_id,
|
||
"total_count": batch_result.total_count,
|
||
"success_count": batch_result.success_count,
|
||
"failed_count": batch_result.failed_count,
|
||
"results": results
|
||
}
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/multimodal/align", response_model=MultimodalAlignmentResponse, tags=["Multimodal"])
|
||
async def align_multimodal_entities_endpoint(
|
||
project_id: str,
|
||
threshold: float = 0.85,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""
|
||
跨模态实体对齐
|
||
|
||
对齐同一实体在不同模态(音频、视频、图片、文档)中的提及
|
||
|
||
**参数:**
|
||
- **threshold**: 相似度阈值,默认 0.85
|
||
"""
|
||
if not MULTIMODAL_LINKER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Multimodal entity linker not available")
|
||
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取所有实体
|
||
entities = db.list_project_entities(project_id)
|
||
|
||
# 获取多模态提及
|
||
conn = db.get_conn()
|
||
mentions = conn.execute(
|
||
"""SELECT * FROM multimodal_mentions WHERE project_id = ?""",
|
||
(project_id,)
|
||
).fetchall()
|
||
conn.close()
|
||
|
||
# 按模态分组实体
|
||
modality_entities = {"audio": [], "video": [], "image": [], "document": []}
|
||
|
||
for mention in mentions:
|
||
modality = mention['modality']
|
||
entity = db.get_entity(mention['entity_id'])
|
||
if entity and entity.id not in [e.get('id') for e in modality_entities[modality]]:
|
||
modality_entities[modality].append({
|
||
'id': entity.id,
|
||
'name': entity.name,
|
||
'type': entity.type,
|
||
'definition': entity.definition,
|
||
'aliases': entity.aliases
|
||
})
|
||
|
||
# 跨模态对齐
|
||
linker = get_multimodal_entity_linker(similarity_threshold=threshold)
|
||
links = linker.align_cross_modal_entities(
|
||
project_id=project_id,
|
||
audio_entities=modality_entities['audio'],
|
||
video_entities=modality_entities['video'],
|
||
image_entities=modality_entities['image'],
|
||
document_entities=modality_entities['document']
|
||
)
|
||
|
||
# 保存关联到数据库
|
||
conn = db.get_conn()
|
||
now = datetime.now().isoformat()
|
||
|
||
saved_links = []
|
||
for link in links:
|
||
conn.execute(
|
||
"""INSERT OR REPLACE INTO multimodal_entity_links
|
||
(id, entity_id, linked_entity_id, link_type, confidence, evidence, modalities, created_at)
|
||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
|
||
(link.id, link.source_entity_id, link.target_entity_id, link.link_type,
|
||
link.confidence, link.evidence,
|
||
json.dumps([link.source_modality, link.target_modality]), now)
|
||
)
|
||
saved_links.append(MultimodalEntityLinkResponse(
|
||
link_id=link.id,
|
||
source_entity_id=link.source_entity_id,
|
||
target_entity_id=link.target_entity_id,
|
||
source_modality=link.source_modality,
|
||
target_modality=link.target_modality,
|
||
link_type=link.link_type,
|
||
confidence=link.confidence,
|
||
evidence=link.evidence
|
||
))
|
||
|
||
conn.commit()
|
||
conn.close()
|
||
|
||
return MultimodalAlignmentResponse(
|
||
project_id=project_id,
|
||
aligned_count=len(saved_links),
|
||
links=saved_links,
|
||
message=f"Successfully aligned {len(saved_links)} cross-modal entity pairs"
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/multimodal/stats", response_model=MultimodalStatsResponse, tags=["Multimodal"])
|
||
async def get_multimodal_stats_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""
|
||
获取项目多模态统计信息
|
||
|
||
返回项目中视频、图片数量,以及跨模态实体关联统计
|
||
"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
conn = db.get_conn()
|
||
|
||
# 统计视频数量
|
||
video_count = conn.execute(
|
||
"SELECT COUNT(*) as count FROM videos WHERE project_id = ?",
|
||
(project_id,)
|
||
).fetchone()['count']
|
||
|
||
# 统计图片数量
|
||
image_count = conn.execute(
|
||
"SELECT COUNT(*) as count FROM images WHERE project_id = ?",
|
||
(project_id,)
|
||
).fetchone()['count']
|
||
|
||
# 统计多模态实体提及
|
||
multimodal_count = conn.execute(
|
||
"SELECT COUNT(DISTINCT entity_id) as count FROM multimodal_mentions WHERE project_id = ?",
|
||
(project_id,)
|
||
).fetchone()['count']
|
||
|
||
# 统计跨模态关联
|
||
cross_modal_count = conn.execute(
|
||
"SELECT COUNT(*) as count FROM multimodal_entity_links WHERE entity_id IN (SELECT id FROM entities WHERE project_id = ?)",
|
||
(project_id,)
|
||
).fetchone()['count']
|
||
|
||
# 模态分布
|
||
modality_dist = {}
|
||
for modality in ['audio', 'video', 'image', 'document']:
|
||
count = conn.execute(
|
||
"SELECT COUNT(*) as count FROM multimodal_mentions WHERE project_id = ? AND modality = ?",
|
||
(project_id, modality)
|
||
).fetchone()['count']
|
||
modality_dist[modality] = count
|
||
|
||
conn.close()
|
||
|
||
return MultimodalStatsResponse(
|
||
project_id=project_id,
|
||
video_count=video_count,
|
||
image_count=image_count,
|
||
multimodal_entity_count=multimodal_count,
|
||
cross_modal_links=cross_modal_count,
|
||
modality_distribution=modality_dist
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/videos", tags=["Multimodal"])
|
||
async def list_project_videos_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目的视频列表"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
conn = db.get_conn()
|
||
|
||
videos = conn.execute(
|
||
"""SELECT id, filename, duration, fps, resolution,
|
||
full_ocr_text, status, created_at
|
||
FROM videos WHERE project_id = ? ORDER BY created_at DESC""",
|
||
(project_id,)
|
||
).fetchall()
|
||
|
||
conn.close()
|
||
|
||
return [{
|
||
"id": v['id'],
|
||
"filename": v['filename'],
|
||
"duration": v['duration'],
|
||
"fps": v['fps'],
|
||
"resolution": json.loads(v['resolution']) if v['resolution'] else None,
|
||
"ocr_preview": v['full_ocr_text'][:200] + "..." if v['full_ocr_text'] and len(v['full_ocr_text']) > 200 else v['full_ocr_text'],
|
||
"status": v['status'],
|
||
"created_at": v['created_at']
|
||
} for v in videos]
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/images", tags=["Multimodal"])
|
||
async def list_project_images_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""获取项目的图片列表"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
conn = db.get_conn()
|
||
|
||
images = conn.execute(
|
||
"""SELECT id, filename, ocr_text, description,
|
||
extracted_entities, status, created_at
|
||
FROM images WHERE project_id = ? ORDER BY created_at DESC""",
|
||
(project_id,)
|
||
).fetchall()
|
||
|
||
conn.close()
|
||
|
||
return [{
|
||
"id": img['id'],
|
||
"filename": img['filename'],
|
||
"ocr_preview": img['ocr_text'][:200] + "..." if img['ocr_text'] and len(img['ocr_text']) > 200 else img['ocr_text'],
|
||
"description": img['description'],
|
||
"entity_count": len(json.loads(img['extracted_entities'])) if img['extracted_entities'] else 0,
|
||
"status": img['status'],
|
||
"created_at": img['created_at']
|
||
} for img in images]
|
||
|
||
|
||
@app.get("/api/v1/videos/{video_id}/frames", tags=["Multimodal"])
|
||
async def get_video_frames_endpoint(video_id: str, _=Depends(verify_api_key)):
|
||
"""获取视频的关键帧列表"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
conn = db.get_conn()
|
||
|
||
frames = conn.execute(
|
||
"""SELECT id, frame_number, timestamp, image_url, ocr_text, extracted_entities
|
||
FROM video_frames WHERE video_id = ? ORDER BY timestamp""",
|
||
(video_id,)
|
||
).fetchall()
|
||
|
||
conn.close()
|
||
|
||
return [{
|
||
"id": f['id'],
|
||
"frame_number": f['frame_number'],
|
||
"timestamp": f['timestamp'],
|
||
"image_url": f['image_url'],
|
||
"ocr_text": f['ocr_text'],
|
||
"entities": json.loads(f['extracted_entities']) if f['extracted_entities'] else []
|
||
} for f in frames]
|
||
|
||
|
||
@app.get("/api/v1/entities/{entity_id}/multimodal-mentions", tags=["Multimodal"])
|
||
async def get_entity_multimodal_mentions_endpoint(entity_id: str, _=Depends(verify_api_key)):
|
||
"""获取实体的多模态提及信息"""
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
conn = db.get_conn()
|
||
|
||
mentions = conn.execute(
|
||
"""SELECT m.*, e.name as entity_name
|
||
FROM multimodal_mentions m
|
||
JOIN entities e ON m.entity_id = e.id
|
||
WHERE m.entity_id = ? ORDER BY m.created_at DESC""",
|
||
(entity_id,)
|
||
).fetchall()
|
||
|
||
conn.close()
|
||
|
||
return [{
|
||
"id": m['id'],
|
||
"entity_id": m['entity_id'],
|
||
"entity_name": m['entity_name'],
|
||
"modality": m['modality'],
|
||
"source_id": m['source_id'],
|
||
"source_type": m['source_type'],
|
||
"text_snippet": m['text_snippet'],
|
||
"confidence": m['confidence'],
|
||
"created_at": m['created_at']
|
||
} for m in mentions]
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/multimodal/suggest-merges", tags=["Multimodal"])
|
||
async def suggest_multimodal_merges_endpoint(project_id: str, _=Depends(verify_api_key)):
|
||
"""
|
||
建议多模态实体合并
|
||
|
||
分析不同模态中的实体,建议可以合并的实体对
|
||
"""
|
||
if not MULTIMODAL_LINKER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Multimodal entity linker not available")
|
||
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=500, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(project_id)
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
# 获取所有实体
|
||
entities = db.list_project_entities(project_id)
|
||
entity_dicts = [{
|
||
'id': e.id,
|
||
'name': e.name,
|
||
'type': e.type,
|
||
'definition': e.definition,
|
||
'aliases': e.aliases
|
||
} for e in entities]
|
||
|
||
# 获取现有链接
|
||
conn = db.get_conn()
|
||
existing_links = conn.execute(
|
||
"""SELECT * FROM multimodal_entity_links
|
||
WHERE entity_id IN (SELECT id FROM entities WHERE project_id = ?)""",
|
||
(project_id,)
|
||
).fetchall()
|
||
conn.close()
|
||
|
||
existing_link_objects = []
|
||
for row in existing_links:
|
||
existing_link_objects.append(EntityLink(
|
||
id=row['id'],
|
||
project_id=project_id,
|
||
source_entity_id=row['entity_id'],
|
||
target_entity_id=row['linked_entity_id'],
|
||
link_type=row['link_type'],
|
||
source_modality='unknown',
|
||
target_modality='unknown',
|
||
confidence=row['confidence'],
|
||
evidence=row['evidence'] or ""
|
||
))
|
||
|
||
# 获取建议
|
||
linker = get_multimodal_entity_linker()
|
||
suggestions = linker.suggest_entity_merges(entity_dicts, existing_link_objects)
|
||
|
||
return {
|
||
"project_id": project_id,
|
||
"suggestion_count": len(suggestions),
|
||
"suggestions": [
|
||
{
|
||
"entity1": {
|
||
"id": s['entity1'].get('id'),
|
||
"name": s['entity1'].get('name'),
|
||
"type": s['entity1'].get('type')
|
||
},
|
||
"entity2": {
|
||
"id": s['entity2'].get('id'),
|
||
"name": s['entity2'].get('name'),
|
||
"type": s['entity2'].get('type')
|
||
},
|
||
"similarity": s['similarity'],
|
||
"match_type": s['match_type'],
|
||
"suggested_action": s['suggested_action']
|
||
}
|
||
for s in suggestions[:20] # 最多返回20个建议
|
||
]
|
||
}
|
||
|
||
|
||
# ==================== Phase 7: Multimodal Support API ====================
|
||
|
||
class VideoUploadResponse(BaseModel):
|
||
video_id: str
|
||
filename: str
|
||
duration: float
|
||
fps: float
|
||
resolution: Dict[str, int]
|
||
frames_extracted: int
|
||
audio_extracted: bool
|
||
ocr_text_length: int
|
||
status: str
|
||
message: str
|
||
|
||
|
||
class ImageUploadResponse(BaseModel):
|
||
image_id: str
|
||
filename: str
|
||
ocr_text_length: int
|
||
description: str
|
||
status: str
|
||
message: str
|
||
|
||
|
||
class MultimodalEntityLinkResponse(BaseModel):
|
||
link_id: str
|
||
entity_id: str
|
||
linked_entity_id: str
|
||
link_type: str
|
||
confidence: float
|
||
evidence: str
|
||
modalities: List[str]
|
||
|
||
|
||
class MultimodalProfileResponse(BaseModel):
|
||
entity_id: str
|
||
entity_name: str
|
||
|
||
|
||
# ==================== Phase 7 Task 7: Plugin Management Pydantic Models ====================
|
||
|
||
class PluginCreate(BaseModel):
|
||
name: str = Field(..., description="插件名称")
|
||
plugin_type: str = Field(..., description="插件类型: chrome_extension, feishu_bot, dingtalk_bot, zapier, make, webdav, custom")
|
||
project_id: str = Field(..., description="关联项目ID")
|
||
config: Dict = Field(default_factory=dict, description="插件配置")
|
||
|
||
|
||
class PluginUpdate(BaseModel):
|
||
name: Optional[str] = None
|
||
status: Optional[str] = None # active, inactive, error, pending
|
||
config: Optional[Dict] = None
|
||
|
||
|
||
class PluginResponse(BaseModel):
|
||
id: str
|
||
name: str
|
||
plugin_type: str
|
||
project_id: str
|
||
status: str
|
||
config: Dict
|
||
created_at: str
|
||
updated_at: str
|
||
last_used_at: Optional[str]
|
||
use_count: int
|
||
|
||
|
||
class PluginListResponse(BaseModel):
|
||
plugins: List[PluginResponse]
|
||
total: int
|
||
|
||
|
||
class ChromeExtensionTokenCreate(BaseModel):
|
||
name: str = Field(..., description="令牌名称")
|
||
project_id: Optional[str] = Field(default=None, description="关联项目ID")
|
||
permissions: List[str] = Field(default=["read"], description="权限列表: read, write, delete")
|
||
expires_days: Optional[int] = Field(default=None, description="过期天数")
|
||
|
||
|
||
class ChromeExtensionTokenResponse(BaseModel):
|
||
id: str
|
||
token: str = Field(..., description="令牌(仅显示一次)")
|
||
name: str
|
||
project_id: Optional[str]
|
||
permissions: List[str]
|
||
expires_at: Optional[str]
|
||
created_at: str
|
||
|
||
|
||
class ChromeExtensionImportRequest(BaseModel):
|
||
token: str = Field(..., description="Chrome扩展令牌")
|
||
url: str = Field(..., description="网页URL")
|
||
title: str = Field(..., description="网页标题")
|
||
content: str = Field(..., description="网页正文内容")
|
||
html_content: Optional[str] = Field(default=None, description="HTML内容(可选)")
|
||
|
||
|
||
class BotSessionCreate(BaseModel):
|
||
session_id: str = Field(..., description="群ID或会话ID")
|
||
session_name: str = Field(..., description="会话名称")
|
||
project_id: Optional[str] = Field(default=None, description="关联项目ID")
|
||
webhook_url: str = Field(default="", description="Webhook URL")
|
||
secret: str = Field(default="", description="签名密钥")
|
||
|
||
|
||
class BotSessionResponse(BaseModel):
|
||
id: str
|
||
bot_type: str
|
||
session_id: str
|
||
session_name: str
|
||
project_id: Optional[str]
|
||
webhook_url: str
|
||
is_active: bool
|
||
created_at: str
|
||
last_message_at: Optional[str]
|
||
message_count: int
|
||
|
||
|
||
class BotMessageRequest(BaseModel):
|
||
session_id: str = Field(..., description="会话ID")
|
||
msg_type: str = Field(default="text", description="消息类型: text, audio, file")
|
||
content: Dict = Field(default_factory=dict, description="消息内容")
|
||
|
||
|
||
class BotMessageResponse(BaseModel):
|
||
success: bool
|
||
response: str
|
||
error: Optional[str] = None
|
||
|
||
|
||
class WebhookEndpointCreate(BaseModel):
|
||
name: str = Field(..., description="端点名称")
|
||
endpoint_type: str = Field(..., description="端点类型: zapier, make, custom")
|
||
endpoint_url: str = Field(..., description="Webhook URL")
|
||
project_id: Optional[str] = Field(default=None, description="关联项目ID")
|
||
auth_type: str = Field(default="none", description="认证类型: none, api_key, oauth, custom")
|
||
auth_config: Dict = Field(default_factory=dict, description="认证配置")
|
||
trigger_events: List[str] = Field(default_factory=list, description="触发事件列表")
|
||
|
||
|
||
class WebhookEndpointResponse(BaseModel):
|
||
id: str
|
||
name: str
|
||
endpoint_type: str
|
||
endpoint_url: str
|
||
project_id: Optional[str]
|
||
auth_type: str
|
||
trigger_events: List[str]
|
||
is_active: bool
|
||
created_at: str
|
||
last_triggered_at: Optional[str]
|
||
trigger_count: int
|
||
|
||
|
||
class WebhookTestResponse(BaseModel):
|
||
success: bool
|
||
endpoint_id: str
|
||
message: str
|
||
|
||
|
||
class WebDAVSyncCreate(BaseModel):
|
||
name: str = Field(..., description="同步配置名称")
|
||
project_id: str = Field(..., description="关联项目ID")
|
||
server_url: str = Field(..., description="WebDAV服务器URL")
|
||
username: str = Field(..., description="用户名")
|
||
password: str = Field(..., description="密码")
|
||
remote_path: str = Field(default="/insightflow", description="远程路径")
|
||
sync_mode: str = Field(default="bidirectional", description="同步模式: bidirectional, upload_only, download_only")
|
||
sync_interval: int = Field(default=3600, description="同步间隔(秒)")
|
||
|
||
|
||
class WebDAVSyncResponse(BaseModel):
|
||
id: str
|
||
name: str
|
||
project_id: str
|
||
server_url: str
|
||
username: str
|
||
remote_path: str
|
||
sync_mode: str
|
||
sync_interval: int
|
||
last_sync_at: Optional[str]
|
||
last_sync_status: str
|
||
is_active: bool
|
||
created_at: str
|
||
sync_count: int
|
||
|
||
|
||
class WebDAVTestResponse(BaseModel):
|
||
success: bool
|
||
message: str
|
||
|
||
|
||
class WebDAVSyncResult(BaseModel):
|
||
success: bool
|
||
message: str
|
||
entities_count: Optional[int] = None
|
||
relations_count: Optional[int] = None
|
||
remote_path: Optional[str] = None
|
||
error: Optional[str] = None
|
||
|
||
|
||
# Plugin Manager singleton
|
||
_plugin_manager_instance = None
|
||
|
||
def get_plugin_manager_instance():
|
||
global _plugin_manager_instance
|
||
if _plugin_manager_instance is None and PLUGIN_MANAGER_AVAILABLE and DB_AVAILABLE:
|
||
db = get_db_manager()
|
||
_plugin_manager_instance = get_plugin_manager(db)
|
||
return _plugin_manager_instance
|
||
|
||
|
||
# ==================== Phase 7 Task 7: Plugin Management Endpoints ====================
|
||
|
||
@app.post("/api/v1/plugins", response_model=PluginResponse, tags=["Plugins"])
|
||
async def create_plugin_endpoint(request: PluginCreate, _=Depends(verify_api_key)):
|
||
"""
|
||
创建插件
|
||
|
||
插件类型:
|
||
- **chrome_extension**: Chrome 扩展
|
||
- **feishu_bot**: 飞书机器人
|
||
- **dingtalk_bot**: 钉钉机器人
|
||
- **zapier**: Zapier 集成
|
||
- **make**: Make (Integromat) 集成
|
||
- **webdav**: WebDAV 同步
|
||
- **custom**: 自定义插件
|
||
"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
plugin = Plugin(
|
||
id=str(uuid.uuid4())[:8],
|
||
name=request.name,
|
||
plugin_type=request.plugin_type,
|
||
project_id=request.project_id,
|
||
config=request.config
|
||
)
|
||
|
||
created = manager.create_plugin(plugin)
|
||
|
||
return PluginResponse(
|
||
id=created.id,
|
||
name=created.name,
|
||
plugin_type=created.plugin_type,
|
||
project_id=created.project_id,
|
||
status=created.status,
|
||
config=created.config,
|
||
created_at=created.created_at,
|
||
updated_at=created.updated_at,
|
||
last_used_at=created.last_used_at,
|
||
use_count=created.use_count
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/plugins", response_model=PluginListResponse, tags=["Plugins"])
|
||
async def list_plugins_endpoint(
|
||
project_id: Optional[str] = None,
|
||
plugin_type: Optional[str] = None,
|
||
status: Optional[str] = None,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""获取插件列表"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
plugins = manager.list_plugins(project_id, plugin_type, status)
|
||
|
||
return PluginListResponse(
|
||
plugins=[
|
||
PluginResponse(
|
||
id=p.id,
|
||
name=p.name,
|
||
plugin_type=p.plugin_type,
|
||
project_id=p.project_id,
|
||
status=p.status,
|
||
config=p.config,
|
||
created_at=p.created_at,
|
||
updated_at=p.updated_at,
|
||
last_used_at=p.last_used_at,
|
||
use_count=p.use_count
|
||
)
|
||
for p in plugins
|
||
],
|
||
total=len(plugins)
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/plugins/{plugin_id}", response_model=PluginResponse, tags=["Plugins"])
|
||
async def get_plugin_endpoint(plugin_id: str, _=Depends(verify_api_key)):
|
||
"""获取插件详情"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
plugin = manager.get_plugin(plugin_id)
|
||
|
||
if not plugin:
|
||
raise HTTPException(status_code=404, detail="Plugin not found")
|
||
|
||
return PluginResponse(
|
||
id=plugin.id,
|
||
name=plugin.name,
|
||
plugin_type=plugin.plugin_type,
|
||
project_id=plugin.project_id,
|
||
status=plugin.status,
|
||
config=plugin.config,
|
||
created_at=plugin.created_at,
|
||
updated_at=plugin.updated_at,
|
||
last_used_at=plugin.last_used_at,
|
||
use_count=plugin.use_count
|
||
)
|
||
|
||
|
||
@app.patch("/api/v1/plugins/{plugin_id}", response_model=PluginResponse, tags=["Plugins"])
|
||
async def update_plugin_endpoint(plugin_id: str, request: PluginUpdate, _=Depends(verify_api_key)):
|
||
"""更新插件"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
update_data = {k: v for k, v in request.dict().items() if v is not None}
|
||
updated = manager.update_plugin(plugin_id, **update_data)
|
||
|
||
if not updated:
|
||
raise HTTPException(status_code=404, detail="Plugin not found")
|
||
|
||
return PluginResponse(
|
||
id=updated.id,
|
||
name=updated.name,
|
||
plugin_type=updated.plugin_type,
|
||
project_id=updated.project_id,
|
||
status=updated.status,
|
||
config=updated.config,
|
||
created_at=updated.created_at,
|
||
updated_at=updated.updated_at,
|
||
last_used_at=updated.last_used_at,
|
||
use_count=updated.use_count
|
||
)
|
||
|
||
|
||
@app.delete("/api/v1/plugins/{plugin_id}", tags=["Plugins"])
|
||
async def delete_plugin_endpoint(plugin_id: str, _=Depends(verify_api_key)):
|
||
"""删除插件"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
success = manager.delete_plugin(plugin_id)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Plugin not found")
|
||
|
||
return {"success": True, "message": "Plugin deleted successfully"}
|
||
|
||
|
||
# ==================== Phase 7 Task 7: Chrome Extension Endpoints ====================
|
||
|
||
@app.post("/api/v1/plugins/chrome/tokens", response_model=ChromeExtensionTokenResponse, tags=["Chrome Extension"])
|
||
async def create_chrome_token_endpoint(request: ChromeExtensionTokenCreate, _=Depends(verify_api_key)):
|
||
"""
|
||
创建 Chrome 扩展令牌
|
||
|
||
用于 Chrome 扩展验证和授权
|
||
"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.CHROME_EXTENSION)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="Chrome extension handler not available")
|
||
|
||
token = handler.create_token(
|
||
name=request.name,
|
||
project_id=request.project_id,
|
||
permissions=request.permissions,
|
||
expires_days=request.expires_days
|
||
)
|
||
|
||
return ChromeExtensionTokenResponse(
|
||
id=token.id,
|
||
token=token.token,
|
||
name=token.name,
|
||
project_id=token.project_id,
|
||
permissions=token.permissions,
|
||
expires_at=token.expires_at,
|
||
created_at=token.created_at
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/plugins/chrome/tokens", tags=["Chrome Extension"])
|
||
async def list_chrome_tokens_endpoint(
|
||
project_id: Optional[str] = None,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""列出 Chrome 扩展令牌"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.CHROME_EXTENSION)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="Chrome extension handler not available")
|
||
|
||
tokens = handler.list_tokens(project_id=project_id)
|
||
|
||
return {
|
||
"tokens": [
|
||
{
|
||
"id": t.id,
|
||
"name": t.name,
|
||
"project_id": t.project_id,
|
||
"permissions": t.permissions,
|
||
"expires_at": t.expires_at,
|
||
"created_at": t.created_at,
|
||
"last_used_at": t.last_used_at,
|
||
"use_count": t.use_count,
|
||
"is_revoked": t.is_revoked
|
||
}
|
||
for t in tokens
|
||
],
|
||
"total": len(tokens)
|
||
}
|
||
|
||
|
||
@app.delete("/api/v1/plugins/chrome/tokens/{token_id}", tags=["Chrome Extension"])
|
||
async def revoke_chrome_token_endpoint(token_id: str, _=Depends(verify_api_key)):
|
||
"""撤销 Chrome 扩展令牌"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.CHROME_EXTENSION)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="Chrome extension handler not available")
|
||
|
||
success = handler.revoke_token(token_id)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Token not found")
|
||
|
||
return {"success": True, "message": "Token revoked successfully"}
|
||
|
||
|
||
@app.post("/api/v1/plugins/chrome/import", tags=["Chrome Extension"])
|
||
async def chrome_import_webpage_endpoint(request: ChromeExtensionImportRequest):
|
||
"""
|
||
Chrome 扩展导入网页内容
|
||
|
||
无需 API Key,使用 Chrome 扩展令牌验证
|
||
"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.CHROME_EXTENSION)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="Chrome extension handler not available")
|
||
|
||
# 验证令牌
|
||
token = handler.validate_token(request.token)
|
||
if not token:
|
||
raise HTTPException(status_code=401, detail="Invalid or expired token")
|
||
|
||
# 导入网页
|
||
result = await handler.import_webpage(
|
||
token=token,
|
||
url=request.url,
|
||
title=request.title,
|
||
content=request.content,
|
||
html_content=request.html_content
|
||
)
|
||
|
||
if not result["success"]:
|
||
raise HTTPException(status_code=400, detail=result.get("error", "Import failed"))
|
||
|
||
return result
|
||
|
||
|
||
# ==================== Phase 7 Task 7: Bot Endpoints ====================
|
||
|
||
@app.post("/api/v1/plugins/bot/feishu/sessions", response_model=BotSessionResponse, tags=["Bot"])
|
||
async def create_feishu_session_endpoint(request: BotSessionCreate, _=Depends(verify_api_key)):
|
||
"""创建飞书机器人会话"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.FEISHU_BOT)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="Feishu bot handler not available")
|
||
|
||
session = handler.create_session(
|
||
session_id=request.session_id,
|
||
session_name=request.session_name,
|
||
project_id=request.project_id,
|
||
webhook_url=request.webhook_url,
|
||
secret=request.secret
|
||
)
|
||
|
||
return BotSessionResponse(
|
||
id=session.id,
|
||
bot_type=session.bot_type,
|
||
session_id=session.session_id,
|
||
session_name=session.session_name,
|
||
project_id=session.project_id,
|
||
webhook_url=session.webhook_url,
|
||
is_active=session.is_active,
|
||
created_at=session.created_at,
|
||
last_message_at=session.last_message_at,
|
||
message_count=session.message_count
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/plugins/bot/dingtalk/sessions", response_model=BotSessionResponse, tags=["Bot"])
|
||
async def create_dingtalk_session_endpoint(request: BotSessionCreate, _=Depends(verify_api_key)):
|
||
"""创建钉钉机器人会话"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.DINGTALK_BOT)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="DingTalk bot handler not available")
|
||
|
||
session = handler.create_session(
|
||
session_id=request.session_id,
|
||
session_name=request.session_name,
|
||
project_id=request.project_id,
|
||
webhook_url=request.webhook_url,
|
||
secret=request.secret
|
||
)
|
||
|
||
return BotSessionResponse(
|
||
id=session.id,
|
||
bot_type=session.bot_type,
|
||
session_id=session.session_id,
|
||
session_name=session.session_name,
|
||
project_id=session.project_id,
|
||
webhook_url=session.webhook_url,
|
||
is_active=session.is_active,
|
||
created_at=session.created_at,
|
||
last_message_at=session.last_message_at,
|
||
message_count=session.message_count
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/plugins/bot/{bot_type}/sessions", tags=["Bot"])
|
||
async def list_bot_sessions_endpoint(
|
||
bot_type: str,
|
||
project_id: Optional[str] = None,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""列出机器人会话"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
if bot_type == "feishu":
|
||
handler = manager.get_handler(PluginType.FEISHU_BOT)
|
||
elif bot_type == "dingtalk":
|
||
handler = manager.get_handler(PluginType.DINGTALK_BOT)
|
||
else:
|
||
raise HTTPException(status_code=400, detail="Invalid bot type. Must be feishu or dingtalk")
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail=f"{bot_type} bot handler not available")
|
||
|
||
sessions = handler.list_sessions(project_id=project_id)
|
||
|
||
return {
|
||
"sessions": [
|
||
{
|
||
"id": s.id,
|
||
"bot_type": s.bot_type,
|
||
"session_id": s.session_id,
|
||
"session_name": s.session_name,
|
||
"project_id": s.project_id,
|
||
"is_active": s.is_active,
|
||
"created_at": s.created_at,
|
||
"last_message_at": s.last_message_at,
|
||
"message_count": s.message_count
|
||
}
|
||
for s in sessions
|
||
],
|
||
"total": len(sessions)
|
||
}
|
||
|
||
|
||
@app.post("/api/v1/plugins/bot/{bot_type}/webhook", tags=["Bot"])
|
||
async def bot_webhook_endpoint(bot_type: str, request: Request):
|
||
"""
|
||
机器人 Webhook 接收端点
|
||
|
||
接收飞书/钉钉机器人的消息
|
||
"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
if bot_type == "feishu":
|
||
handler = manager.get_handler(PluginType.FEISHU_BOT)
|
||
elif bot_type == "dingtalk":
|
||
handler = manager.get_handler(PluginType.DINGTALK_BOT)
|
||
else:
|
||
raise HTTPException(status_code=400, detail="Invalid bot type")
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail=f"{bot_type} bot handler not available")
|
||
|
||
# 获取消息内容
|
||
message = await request.json()
|
||
|
||
# 获取会话ID(飞书和钉钉的格式不同)
|
||
if bot_type == "feishu":
|
||
session_id = message.get('chat_id') or message.get('open_chat_id')
|
||
else: # dingtalk
|
||
session_id = message.get('conversationId') or message.get('senderStaffId')
|
||
|
||
if not session_id:
|
||
raise HTTPException(status_code=400, detail="Cannot identify session")
|
||
|
||
# 获取会话
|
||
session = handler.get_session(session_id)
|
||
if not session:
|
||
# 自动创建会话
|
||
session = handler.create_session(
|
||
session_id=session_id,
|
||
session_name=f"Auto-{session_id[:8]}",
|
||
webhook_url=""
|
||
)
|
||
|
||
# 处理消息
|
||
result = await handler.handle_message(session, message)
|
||
|
||
# 如果配置了 webhook,发送回复
|
||
if session.webhook_url and result.get("response"):
|
||
await handler.send_message(session, result["response"])
|
||
|
||
return result
|
||
|
||
|
||
@app.post("/api/v1/plugins/bot/{bot_type}/sessions/{session_id}/send", tags=["Bot"])
|
||
async def send_bot_message_endpoint(
|
||
bot_type: str,
|
||
session_id: str,
|
||
message: str,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""发送消息到机器人会话"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
if bot_type == "feishu":
|
||
handler = manager.get_handler(PluginType.FEISHU_BOT)
|
||
elif bot_type == "dingtalk":
|
||
handler = manager.get_handler(PluginType.DINGTALK_BOT)
|
||
else:
|
||
raise HTTPException(status_code=400, detail="Invalid bot type")
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail=f"{bot_type} bot handler not available")
|
||
|
||
session = handler.get_session(session_id)
|
||
if not session:
|
||
raise HTTPException(status_code=404, detail="Session not found")
|
||
|
||
success = await handler.send_message(session, message)
|
||
|
||
return {"success": success, "message": "Message sent" if success else "Failed to send message"}
|
||
|
||
|
||
# ==================== Phase 7 Task 7: Integration Endpoints ====================
|
||
|
||
@app.post("/api/v1/plugins/integrations/zapier", response_model=WebhookEndpointResponse, tags=["Integrations"])
|
||
async def create_zapier_endpoint(request: WebhookEndpointCreate, _=Depends(verify_api_key)):
|
||
"""创建 Zapier Webhook 端点"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.ZAPIER)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="Zapier handler not available")
|
||
|
||
endpoint = handler.create_endpoint(
|
||
name=request.name,
|
||
endpoint_url=request.endpoint_url,
|
||
project_id=request.project_id,
|
||
auth_type=request.auth_type,
|
||
auth_config=request.auth_config,
|
||
trigger_events=request.trigger_events
|
||
)
|
||
|
||
return WebhookEndpointResponse(
|
||
id=endpoint.id,
|
||
name=endpoint.name,
|
||
endpoint_type=endpoint.endpoint_type,
|
||
endpoint_url=endpoint.endpoint_url,
|
||
project_id=endpoint.project_id,
|
||
auth_type=endpoint.auth_type,
|
||
trigger_events=endpoint.trigger_events,
|
||
is_active=endpoint.is_active,
|
||
created_at=endpoint.created_at,
|
||
last_triggered_at=endpoint.last_triggered_at,
|
||
trigger_count=endpoint.trigger_count
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/plugins/integrations/make", response_model=WebhookEndpointResponse, tags=["Integrations"])
|
||
async def create_make_endpoint(request: WebhookEndpointCreate, _=Depends(verify_api_key)):
|
||
"""创建 Make (Integromat) Webhook 端点"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.MAKE)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="Make handler not available")
|
||
|
||
endpoint = handler.create_endpoint(
|
||
name=request.name,
|
||
endpoint_url=request.endpoint_url,
|
||
project_id=request.project_id,
|
||
auth_type=request.auth_type,
|
||
auth_config=request.auth_config,
|
||
trigger_events=request.trigger_events
|
||
)
|
||
|
||
return WebhookEndpointResponse(
|
||
id=endpoint.id,
|
||
name=endpoint.name,
|
||
endpoint_type=endpoint.endpoint_type,
|
||
endpoint_url=endpoint.endpoint_url,
|
||
project_id=endpoint.project_id,
|
||
auth_type=endpoint.auth_type,
|
||
trigger_events=endpoint.trigger_events,
|
||
is_active=endpoint.is_active,
|
||
created_at=endpoint.created_at,
|
||
last_triggered_at=endpoint.last_triggered_at,
|
||
trigger_count=endpoint.trigger_count
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/plugins/integrations/{endpoint_type}", tags=["Integrations"])
|
||
async def list_integration_endpoints_endpoint(
|
||
endpoint_type: str,
|
||
project_id: Optional[str] = None,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""列出集成端点"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
if endpoint_type == "zapier":
|
||
handler = manager.get_handler(PluginType.ZAPIER)
|
||
elif endpoint_type == "make":
|
||
handler = manager.get_handler(PluginType.MAKE)
|
||
else:
|
||
raise HTTPException(status_code=400, detail="Invalid endpoint type")
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail=f"{endpoint_type} handler not available")
|
||
|
||
endpoints = handler.list_endpoints(project_id=project_id)
|
||
|
||
return {
|
||
"endpoints": [
|
||
{
|
||
"id": e.id,
|
||
"name": e.name,
|
||
"endpoint_type": e.endpoint_type,
|
||
"endpoint_url": e.endpoint_url,
|
||
"project_id": e.project_id,
|
||
"auth_type": e.auth_type,
|
||
"trigger_events": e.trigger_events,
|
||
"is_active": e.is_active,
|
||
"created_at": e.created_at,
|
||
"last_triggered_at": e.last_triggered_at,
|
||
"trigger_count": e.trigger_count
|
||
}
|
||
for e in endpoints
|
||
],
|
||
"total": len(endpoints)
|
||
}
|
||
|
||
|
||
@app.post("/api/v1/plugins/integrations/{endpoint_id}/test", response_model=WebhookTestResponse, tags=["Integrations"])
|
||
async def test_integration_endpoint(endpoint_id: str, _=Depends(verify_api_key)):
|
||
"""测试集成端点"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
# 尝试获取端点(可能是 Zapier 或 Make)
|
||
handler = manager.get_handler(PluginType.ZAPIER)
|
||
endpoint = handler.get_endpoint(endpoint_id) if handler else None
|
||
|
||
if not endpoint:
|
||
handler = manager.get_handler(PluginType.MAKE)
|
||
endpoint = handler.get_endpoint(endpoint_id) if handler else None
|
||
|
||
if not endpoint:
|
||
raise HTTPException(status_code=404, detail="Endpoint not found")
|
||
|
||
result = await handler.test_endpoint(endpoint)
|
||
|
||
return WebhookTestResponse(
|
||
success=result["success"],
|
||
endpoint_id=endpoint_id,
|
||
message=result["message"]
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/plugins/integrations/{endpoint_id}/trigger", tags=["Integrations"])
|
||
async def trigger_integration_endpoint(
|
||
endpoint_id: str,
|
||
event_type: str,
|
||
data: Dict,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""手动触发集成端点"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
|
||
# 尝试获取端点(可能是 Zapier 或 Make)
|
||
handler = manager.get_handler(PluginType.ZAPIER)
|
||
endpoint = handler.get_endpoint(endpoint_id) if handler else None
|
||
|
||
if not endpoint:
|
||
handler = manager.get_handler(PluginType.MAKE)
|
||
endpoint = handler.get_endpoint(endpoint_id) if handler else None
|
||
|
||
if not endpoint:
|
||
raise HTTPException(status_code=404, detail="Endpoint not found")
|
||
|
||
success = await handler.trigger(endpoint, event_type, data)
|
||
|
||
return {"success": success, "message": "Triggered successfully" if success else "Trigger failed"}
|
||
|
||
|
||
# ==================== Phase 7 Task 7: WebDAV Endpoints ====================
|
||
|
||
@app.post("/api/v1/plugins/webdav", response_model=WebDAVSyncResponse, tags=["WebDAV"])
|
||
async def create_webdav_sync_endpoint(request: WebDAVSyncCreate, _=Depends(verify_api_key)):
|
||
"""
|
||
创建 WebDAV 同步配置
|
||
|
||
支持与坚果云等 WebDAV 网盘同步项目数据
|
||
"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.WEBDAV)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="WebDAV handler not available")
|
||
|
||
sync = handler.create_sync(
|
||
name=request.name,
|
||
project_id=request.project_id,
|
||
server_url=request.server_url,
|
||
username=request.username,
|
||
password=request.password,
|
||
remote_path=request.remote_path,
|
||
sync_mode=request.sync_mode,
|
||
sync_interval=request.sync_interval
|
||
)
|
||
|
||
return WebDAVSyncResponse(
|
||
id=sync.id,
|
||
name=sync.name,
|
||
project_id=sync.project_id,
|
||
server_url=sync.server_url,
|
||
username=sync.username,
|
||
remote_path=sync.remote_path,
|
||
sync_mode=sync.sync_mode,
|
||
sync_interval=sync.sync_interval,
|
||
last_sync_at=sync.last_sync_at,
|
||
last_sync_status=sync.last_sync_status,
|
||
is_active=sync.is_active,
|
||
created_at=sync.created_at,
|
||
sync_count=sync.sync_count
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/plugins/webdav", tags=["WebDAV"])
|
||
async def list_webdav_syncs_endpoint(
|
||
project_id: Optional[str] = None,
|
||
_=Depends(verify_api_key)
|
||
):
|
||
"""列出 WebDAV 同步配置"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.WEBDAV)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="WebDAV handler not available")
|
||
|
||
syncs = handler.list_syncs(project_id=project_id)
|
||
|
||
return {
|
||
"syncs": [
|
||
{
|
||
"id": s.id,
|
||
"name": s.name,
|
||
"project_id": s.project_id,
|
||
"server_url": s.server_url,
|
||
"username": s.username,
|
||
"remote_path": s.remote_path,
|
||
"sync_mode": s.sync_mode,
|
||
"sync_interval": s.sync_interval,
|
||
"last_sync_at": s.last_sync_at,
|
||
"last_sync_status": s.last_sync_status,
|
||
"is_active": s.is_active,
|
||
"created_at": s.created_at,
|
||
"sync_count": s.sync_count
|
||
}
|
||
for s in syncs
|
||
],
|
||
"total": len(syncs)
|
||
}
|
||
|
||
|
||
@app.post("/api/v1/plugins/webdav/{sync_id}/test", response_model=WebDAVTestResponse, tags=["WebDAV"])
|
||
async def test_webdav_connection_endpoint(sync_id: str, _=Depends(verify_api_key)):
|
||
"""测试 WebDAV 连接"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.WEBDAV)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="WebDAV handler not available")
|
||
|
||
sync = handler.get_sync(sync_id)
|
||
if not sync:
|
||
raise HTTPException(status_code=404, detail="Sync configuration not found")
|
||
|
||
result = await handler.test_connection(sync)
|
||
|
||
return WebDAVTestResponse(
|
||
success=result["success"],
|
||
message=result.get("message") or result.get("error", "Unknown result")
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/plugins/webdav/{sync_id}/sync", response_model=WebDAVSyncResult, tags=["WebDAV"])
|
||
async def sync_webdav_endpoint(sync_id: str, _=Depends(verify_api_key)):
|
||
"""执行 WebDAV 同步"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.WEBDAV)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="WebDAV handler not available")
|
||
|
||
sync = handler.get_sync(sync_id)
|
||
if not sync:
|
||
raise HTTPException(status_code=404, detail="Sync configuration not found")
|
||
|
||
result = await handler.sync_project(sync)
|
||
|
||
return WebDAVSyncResult(
|
||
success=result["success"],
|
||
message=result.get("message") or result.get("error", "Sync completed"),
|
||
entities_count=result.get("entities_count"),
|
||
relations_count=result.get("relations_count"),
|
||
remote_path=result.get("remote_path"),
|
||
error=result.get("error")
|
||
)
|
||
|
||
|
||
@app.delete("/api/v1/plugins/webdav/{sync_id}", tags=["WebDAV"])
|
||
async def delete_webdav_sync_endpoint(sync_id: str, _=Depends(verify_api_key)):
|
||
"""删除 WebDAV 同步配置"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager_instance()
|
||
handler = manager.get_handler(PluginType.WEBDAV)
|
||
|
||
if not handler:
|
||
raise HTTPException(status_code=503, detail="WebDAV handler not available")
|
||
|
||
success = handler.delete_sync(sync_id)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Sync configuration not found")
|
||
|
||
return {"success": True, "message": "WebDAV sync configuration deleted"}
|
||
|
||
|
||
@app.get("/api/v1/openapi.json", include_in_schema=False)
|
||
async def get_openapi():
|
||
"""获取 OpenAPI 规范"""
|
||
from fastapi.openapi.utils import get_openapi
|
||
return get_openapi(
|
||
title=app.title,
|
||
version=app.version,
|
||
description=app.description,
|
||
routes=app.routes,
|
||
tags=app.openapi_tags
|
||
)
|
||
|
||
|
||
# Serve frontend - MUST be last to not override API routes
|
||
app.mount("/", StaticFiles(directory="frontend", html=True), name="frontend")
|
||
|
||
if __name__ == "__main__":
|
||
import uvicorn
|
||
uvicorn.run(app, host="0.0.0.0", port=8000)
|
||
|
||
class PluginCreateRequest(BaseModel):
|
||
name: str
|
||
plugin_type: str
|
||
project_id: Optional[str] = None
|
||
config: Optional[Dict] = {}
|
||
|
||
|
||
class PluginResponse(BaseModel):
|
||
id: str
|
||
name: str
|
||
plugin_type: str
|
||
project_id: Optional[str]
|
||
status: str
|
||
api_key: str
|
||
created_at: str
|
||
|
||
|
||
class BotSessionResponse(BaseModel):
|
||
id: str
|
||
plugin_id: str
|
||
platform: str
|
||
session_id: str
|
||
user_id: Optional[str]
|
||
user_name: Optional[str]
|
||
project_id: Optional[str]
|
||
message_count: int
|
||
created_at: str
|
||
last_message_at: Optional[str]
|
||
|
||
|
||
class WebhookEndpointResponse(BaseModel):
|
||
id: str
|
||
plugin_id: str
|
||
name: str
|
||
endpoint_path: str
|
||
endpoint_type: str
|
||
target_project_id: Optional[str]
|
||
is_active: bool
|
||
trigger_count: int
|
||
created_at: str
|
||
|
||
|
||
class WebDAVSyncResponse(BaseModel):
|
||
id: str
|
||
plugin_id: str
|
||
name: str
|
||
server_url: str
|
||
username: str
|
||
remote_path: str
|
||
local_path: str
|
||
sync_direction: str
|
||
sync_mode: str
|
||
auto_analyze: bool
|
||
is_active: bool
|
||
last_sync_at: Optional[str]
|
||
created_at: str
|
||
|
||
|
||
class ChromeClipRequest(BaseModel):
|
||
url: str
|
||
title: str
|
||
content: str
|
||
content_type: str = "page"
|
||
meta: Optional[Dict] = {}
|
||
project_id: Optional[str] = None
|
||
|
||
|
||
class ChromeClipResponse(BaseModel):
|
||
clip_id: str
|
||
project_id: str
|
||
url: str
|
||
title: str
|
||
status: str
|
||
message: str
|
||
|
||
|
||
class BotMessageRequest(BaseModel):
|
||
platform: str
|
||
session_id: str
|
||
user_id: Optional[str] = None
|
||
user_name: Optional[str] = None
|
||
message_type: str
|
||
content: str
|
||
project_id: Optional[str] = None
|
||
|
||
|
||
class BotMessageResponse(BaseModel):
|
||
success: bool
|
||
reply: Optional[str] = None
|
||
session_id: str
|
||
action: Optional[str] = None
|
||
|
||
|
||
class WebhookPayload(BaseModel):
|
||
event: str
|
||
data: Dict
|
||
|
||
|
||
@app.post("/api/v1/plugins", response_model=PluginResponse, tags=["Plugins"])
|
||
async def create_plugin(
|
||
request: PluginCreateRequest,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""创建插件"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
plugin = manager.create_plugin(
|
||
name=request.name,
|
||
plugin_type=request.plugin_type,
|
||
project_id=request.project_id,
|
||
config=request.config
|
||
)
|
||
|
||
return PluginResponse(
|
||
id=plugin.id,
|
||
name=plugin.name,
|
||
plugin_type=plugin.plugin_type,
|
||
project_id=plugin.project_id,
|
||
status=plugin.status,
|
||
api_key=plugin.api_key,
|
||
created_at=plugin.created_at
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/plugins", tags=["Plugins"])
|
||
async def list_plugins(
|
||
project_id: Optional[str] = None,
|
||
plugin_type: Optional[str] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""列出插件"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
plugins = manager.list_plugins(project_id=project_id, plugin_type=plugin_type)
|
||
|
||
return {
|
||
"plugins": [
|
||
{
|
||
"id": p.id,
|
||
"name": p.name,
|
||
"plugin_type": p.plugin_type,
|
||
"project_id": p.project_id,
|
||
"status": p.status,
|
||
"use_count": p.use_count,
|
||
"created_at": p.created_at
|
||
}
|
||
for p in plugins
|
||
]
|
||
}
|
||
|
||
|
||
@app.get("/api/v1/plugins/{plugin_id}", response_model=PluginResponse, tags=["Plugins"])
|
||
async def get_plugin(
|
||
plugin_id: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""获取插件详情"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
plugin = manager.get_plugin(plugin_id)
|
||
|
||
if not plugin:
|
||
raise HTTPException(status_code=404, detail="Plugin not found")
|
||
|
||
return PluginResponse(
|
||
id=plugin.id,
|
||
name=plugin.name,
|
||
plugin_type=plugin.plugin_type,
|
||
project_id=plugin.project_id,
|
||
status=plugin.status,
|
||
api_key=plugin.api_key,
|
||
created_at=plugin.created_at
|
||
)
|
||
|
||
|
||
@app.delete("/api/v1/plugins/{plugin_id}", tags=["Plugins"])
|
||
async def delete_plugin(
|
||
plugin_id: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""删除插件"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
manager.delete_plugin(plugin_id)
|
||
|
||
return {"success": True, "message": "Plugin deleted"}
|
||
|
||
|
||
@app.post("/api/v1/plugins/{plugin_id}/regenerate-key", tags=["Plugins"])
|
||
async def regenerate_plugin_key(
|
||
plugin_id: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""重新生成插件 API Key"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
new_key = manager.regenerate_api_key(plugin_id)
|
||
|
||
return {"success": True, "api_key": new_key}
|
||
|
||
|
||
# ==================== Chrome Extension API ====================
|
||
|
||
@app.post("/api/v1/plugins/chrome/clip", response_model=ChromeClipResponse, tags=["Chrome Extension"])
|
||
async def chrome_clip(
|
||
request: ChromeClipRequest,
|
||
x_api_key: Optional[str] = Header(None, alias="X-API-Key")
|
||
):
|
||
"""Chrome 插件保存网页内容"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
if not x_api_key:
|
||
raise HTTPException(status_code=401, detail="API Key required")
|
||
|
||
manager = get_plugin_manager()
|
||
plugin = manager.get_plugin_by_api_key(x_api_key)
|
||
|
||
if not plugin or plugin.plugin_type != "chrome_extension":
|
||
raise HTTPException(status_code=401, detail="Invalid API Key")
|
||
|
||
# 确定目标项目
|
||
project_id = request.project_id or plugin.project_id
|
||
if not project_id:
|
||
raise HTTPException(status_code=400, detail="Project ID required")
|
||
|
||
# 创建转录记录(将网页内容作为文档处理)
|
||
db = get_db_manager()
|
||
|
||
# 生成文档内容
|
||
doc_content = f"""# {request.title}
|
||
|
||
URL: {request.url}
|
||
|
||
## 内容
|
||
|
||
{request.content}
|
||
|
||
## 元数据
|
||
|
||
{json.dumps(request.meta, ensure_ascii=False, indent=2)}
|
||
"""
|
||
|
||
# 创建转录记录
|
||
transcript_id = db.create_transcript(
|
||
project_id=project_id,
|
||
filename=f"clip_{request.title[:50]}.md",
|
||
full_text=doc_content,
|
||
transcript_type="document"
|
||
)
|
||
|
||
# 记录活动
|
||
manager.log_activity(
|
||
plugin_id=plugin.id,
|
||
activity_type="clip",
|
||
source="chrome_extension",
|
||
details={
|
||
"url": request.url,
|
||
"title": request.title,
|
||
"project_id": project_id,
|
||
"transcript_id": transcript_id
|
||
}
|
||
)
|
||
|
||
return ChromeClipResponse(
|
||
clip_id=str(uuid.uuid4()),
|
||
project_id=project_id,
|
||
url=request.url,
|
||
title=request.title,
|
||
status="success",
|
||
message="Content saved successfully"
|
||
)
|
||
|
||
|
||
# ==================== Bot API ====================
|
||
|
||
@app.post("/api/v1/bots/webhook/{platform}", response_model=BotMessageResponse, tags=["Bot"])
|
||
async def bot_webhook(
|
||
platform: str,
|
||
request: Request,
|
||
x_signature: Optional[str] = Header(None, alias="X-Signature")
|
||
):
|
||
"""接收机器人 Webhook 消息(飞书/钉钉/Slack)"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
body = await request.body()
|
||
payload = json.loads(body)
|
||
|
||
manager = get_plugin_manager()
|
||
handler = BotHandler(manager)
|
||
|
||
# 解析消息
|
||
if platform == "feishu":
|
||
message = handler.parse_feishu_message(payload)
|
||
elif platform == "dingtalk":
|
||
message = handler.parse_dingtalk_message(payload)
|
||
elif platform == "slack":
|
||
message = handler.parse_slack_message(payload)
|
||
else:
|
||
raise HTTPException(status_code=400, detail=f"Unsupported platform: {platform}")
|
||
|
||
# 查找或创建会话
|
||
# 这里简化处理,实际应该根据 plugin_id 查找
|
||
# 暂时返回简单的回复
|
||
|
||
return BotMessageResponse(
|
||
success=True,
|
||
reply="收到消息!请使用 InsightFlow 控制台查看更多功能。",
|
||
session_id=message.get("session_id", ""),
|
||
action="reply"
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/bots/sessions", response_model=List[BotSessionResponse], tags=["Bot"])
|
||
async def list_bot_sessions(
|
||
plugin_id: Optional[str] = None,
|
||
project_id: Optional[str] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""列出机器人会话"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
sessions = manager.list_bot_sessions(plugin_id=plugin_id, project_id=project_id)
|
||
|
||
return [
|
||
BotSessionResponse(
|
||
id=s.id,
|
||
plugin_id=s.plugin_id,
|
||
platform=s.platform,
|
||
session_id=s.session_id,
|
||
user_id=s.user_id,
|
||
user_name=s.user_name,
|
||
project_id=s.project_id,
|
||
message_count=s.message_count,
|
||
created_at=s.created_at,
|
||
last_message_at=s.last_message_at
|
||
)
|
||
for s in sessions
|
||
]
|
||
|
||
|
||
# ==================== Webhook Integration API ====================
|
||
|
||
@app.post("/api/v1/webhook-endpoints", response_model=WebhookEndpointResponse, tags=["Integrations"])
|
||
async def create_webhook_endpoint(
|
||
plugin_id: str,
|
||
name: str,
|
||
endpoint_type: str,
|
||
target_project_id: Optional[str] = None,
|
||
allowed_events: Optional[List[str]] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""创建 Webhook 端点(用于 Zapier/Make 集成)"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
endpoint = manager.create_webhook_endpoint(
|
||
plugin_id=plugin_id,
|
||
name=name,
|
||
endpoint_type=endpoint_type,
|
||
target_project_id=target_project_id,
|
||
allowed_events=allowed_events
|
||
)
|
||
|
||
return WebhookEndpointResponse(
|
||
id=endpoint.id,
|
||
plugin_id=endpoint.plugin_id,
|
||
name=endpoint.name,
|
||
endpoint_path=endpoint.endpoint_path,
|
||
endpoint_type=endpoint.endpoint_type,
|
||
target_project_id=endpoint.target_project_id,
|
||
is_active=endpoint.is_active,
|
||
trigger_count=endpoint.trigger_count,
|
||
created_at=endpoint.created_at
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/webhook-endpoints", response_model=List[WebhookEndpointResponse], tags=["Integrations"])
|
||
async def list_webhook_endpoints(
|
||
plugin_id: Optional[str] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""列出 Webhook 端点"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
endpoints = manager.list_webhook_endpoints(plugin_id=plugin_id)
|
||
|
||
return [
|
||
WebhookEndpointResponse(
|
||
id=e.id,
|
||
plugin_id=e.plugin_id,
|
||
name=e.name,
|
||
endpoint_path=e.endpoint_path,
|
||
endpoint_type=e.endpoint_type,
|
||
target_project_id=e.target_project_id,
|
||
is_active=e.is_active,
|
||
trigger_count=e.trigger_count,
|
||
created_at=e.created_at
|
||
)
|
||
for e in endpoints
|
||
]
|
||
|
||
|
||
@app.post("/webhook/{endpoint_type}/{token}", tags=["Integrations"])
|
||
async def receive_webhook(
|
||
endpoint_type: str,
|
||
token: str,
|
||
request: Request,
|
||
x_signature: Optional[str] = Header(None, alias="X-Signature")
|
||
):
|
||
"""接收外部 Webhook 调用(Zapier/Make/Custom)"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
|
||
# 构建完整路径查找端点
|
||
path = f"/webhook/{endpoint_type}/{token}"
|
||
endpoint = manager.get_webhook_endpoint_by_path(path)
|
||
|
||
if not endpoint or not endpoint.is_active:
|
||
raise HTTPException(status_code=404, detail="Webhook endpoint not found")
|
||
|
||
# 验证签名(如果有)
|
||
if endpoint.secret and x_signature:
|
||
body = await request.body()
|
||
integration = WebhookIntegration(manager)
|
||
if not integration.validate_signature(body, x_signature, endpoint.secret):
|
||
raise HTTPException(status_code=401, detail="Invalid signature")
|
||
|
||
# 解析请求体
|
||
body = await request.json()
|
||
|
||
# 更新触发统计
|
||
manager.update_webhook_trigger(endpoint.id)
|
||
|
||
# 记录活动
|
||
manager.log_activity(
|
||
plugin_id=endpoint.plugin_id,
|
||
activity_type="webhook",
|
||
source=endpoint_type,
|
||
details={
|
||
"endpoint_id": endpoint.id,
|
||
"event": body.get("event"),
|
||
"data_keys": list(body.get("data", {}).keys())
|
||
}
|
||
)
|
||
|
||
# 处理数据(简化版本)
|
||
# 实际应该根据 endpoint.target_project_id 和 body 内容创建文档/实体等
|
||
|
||
return {
|
||
"success": True,
|
||
"endpoint_id": endpoint.id,
|
||
"received_at": datetime.now().isoformat()
|
||
}
|
||
|
||
|
||
# ==================== WebDAV API ====================
|
||
|
||
@app.post("/api/v1/webdav-syncs", response_model=WebDAVSyncResponse, tags=["WebDAV"])
|
||
async def create_webdav_sync(
|
||
plugin_id: str,
|
||
name: str,
|
||
server_url: str,
|
||
username: str,
|
||
password: str,
|
||
remote_path: str = "/",
|
||
local_path: str = "./sync",
|
||
sync_direction: str = "bidirectional",
|
||
sync_mode: str = "manual",
|
||
auto_analyze: bool = True,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""创建 WebDAV 同步配置"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
sync = manager.create_webdav_sync(
|
||
plugin_id=plugin_id,
|
||
name=name,
|
||
server_url=server_url,
|
||
username=username,
|
||
password=password,
|
||
remote_path=remote_path,
|
||
local_path=local_path,
|
||
sync_direction=sync_direction,
|
||
sync_mode=sync_mode,
|
||
auto_analyze=auto_analyze
|
||
)
|
||
|
||
return WebDAVSyncResponse(
|
||
id=sync.id,
|
||
plugin_id=sync.plugin_id,
|
||
name=sync.name,
|
||
server_url=sync.server_url,
|
||
username=sync.username,
|
||
remote_path=sync.remote_path,
|
||
local_path=sync.local_path,
|
||
sync_direction=sync.sync_direction,
|
||
sync_mode=sync.sync_mode,
|
||
auto_analyze=sync.auto_analyze,
|
||
is_active=sync.is_active,
|
||
last_sync_at=sync.last_sync_at,
|
||
created_at=sync.created_at
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/webdav-syncs", response_model=List[WebDAVSyncResponse], tags=["WebDAV"])
|
||
async def list_webdav_syncs(
|
||
plugin_id: Optional[str] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""列出 WebDAV 同步配置"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
syncs = manager.list_webdav_syncs(plugin_id=plugin_id)
|
||
|
||
return [
|
||
WebDAVSyncResponse(
|
||
id=s.id,
|
||
plugin_id=s.plugin_id,
|
||
name=s.name,
|
||
server_url=s.server_url,
|
||
username=s.username,
|
||
remote_path=s.remote_path,
|
||
local_path=s.local_path,
|
||
sync_direction=s.sync_direction,
|
||
sync_mode=s.sync_mode,
|
||
auto_analyze=s.auto_analyze,
|
||
is_active=s.is_active,
|
||
last_sync_at=s.last_sync_at,
|
||
created_at=s.created_at
|
||
)
|
||
for s in syncs
|
||
]
|
||
|
||
|
||
@app.post("/api/v1/webdav-syncs/{sync_id}/test", tags=["WebDAV"])
|
||
async def test_webdav_connection(
|
||
sync_id: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""测试 WebDAV 连接"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
sync = manager.get_webdav_sync(sync_id)
|
||
|
||
if not sync:
|
||
raise HTTPException(status_code=404, detail="WebDAV sync not found")
|
||
|
||
from plugin_manager import WebDAVSync as WebDAVSyncHandler
|
||
handler = WebDAVSyncHandler(manager)
|
||
|
||
success, message = await handler.test_connection(
|
||
sync.server_url,
|
||
sync.username,
|
||
sync.password
|
||
)
|
||
|
||
return {"success": success, "message": message}
|
||
|
||
|
||
@app.post("/api/v1/webdav-syncs/{sync_id}/sync", tags=["WebDAV"])
|
||
async def trigger_webdav_sync(
|
||
sync_id: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""手动触发 WebDAV 同步"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
sync = manager.get_webdav_sync(sync_id)
|
||
|
||
if not sync:
|
||
raise HTTPException(status_code=404, detail="WebDAV sync not found")
|
||
|
||
# 这里应该启动异步同步任务
|
||
# 简化版本,仅返回成功
|
||
|
||
manager.update_webdav_sync(
|
||
sync_id,
|
||
last_sync_at=datetime.now().isoformat(),
|
||
last_sync_status="running"
|
||
)
|
||
|
||
return {
|
||
"success": True,
|
||
"sync_id": sync_id,
|
||
"status": "running",
|
||
"message": "Sync started"
|
||
}
|
||
|
||
|
||
# ==================== Plugin Activity Logs ====================
|
||
|
||
@app.get("/api/v1/plugins/{plugin_id}/logs", tags=["Plugins"])
|
||
async def get_plugin_logs(
|
||
plugin_id: str,
|
||
activity_type: Optional[str] = None,
|
||
limit: int = 100,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""获取插件活动日志"""
|
||
if not PLUGIN_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Plugin manager not available")
|
||
|
||
manager = get_plugin_manager()
|
||
logs = manager.get_activity_logs(
|
||
plugin_id=plugin_id,
|
||
activity_type=activity_type,
|
||
limit=limit
|
||
)
|
||
|
||
return {
|
||
"logs": [
|
||
{
|
||
"id": log.id,
|
||
"activity_type": log.activity_type,
|
||
"source": log.source,
|
||
"details": log.details,
|
||
"created_at": log.created_at
|
||
}
|
||
for log in logs
|
||
]
|
||
}
|
||
|
||
|
||
# ==================== Phase 7 Task 3: Security & Compliance API ====================
|
||
|
||
# Pydantic models for security API
|
||
class AuditLogResponse(BaseModel):
|
||
id: str
|
||
action_type: str
|
||
user_id: Optional[str] = None
|
||
user_ip: Optional[str] = None
|
||
resource_type: Optional[str] = None
|
||
resource_id: Optional[str] = None
|
||
action_details: Optional[str] = None
|
||
success: bool = True
|
||
error_message: Optional[str] = None
|
||
created_at: str
|
||
|
||
|
||
class AuditStatsResponse(BaseModel):
|
||
total_actions: int
|
||
success_count: int
|
||
failure_count: int
|
||
action_breakdown: Dict[str, Dict[str, int]]
|
||
|
||
|
||
class EncryptionEnableRequest(BaseModel):
|
||
master_password: str
|
||
|
||
|
||
class EncryptionConfigResponse(BaseModel):
|
||
id: str
|
||
project_id: str
|
||
is_enabled: bool
|
||
encryption_type: str
|
||
created_at: str
|
||
updated_at: str
|
||
|
||
|
||
class MaskingRuleCreateRequest(BaseModel):
|
||
name: str
|
||
rule_type: str # phone, email, id_card, bank_card, name, address, custom
|
||
pattern: Optional[str] = None
|
||
replacement: Optional[str] = None
|
||
description: Optional[str] = None
|
||
priority: int = 0
|
||
|
||
|
||
class MaskingRuleResponse(BaseModel):
|
||
id: str
|
||
project_id: str
|
||
name: str
|
||
rule_type: str
|
||
pattern: str
|
||
replacement: str
|
||
is_active: bool
|
||
priority: int
|
||
description: Optional[str] = None
|
||
created_at: str
|
||
updated_at: str
|
||
|
||
|
||
class MaskingApplyRequest(BaseModel):
|
||
text: str
|
||
rule_types: Optional[List[str]] = None
|
||
|
||
|
||
class MaskingApplyResponse(BaseModel):
|
||
original_text: str
|
||
masked_text: str
|
||
applied_rules: List[str]
|
||
|
||
|
||
class AccessPolicyCreateRequest(BaseModel):
|
||
name: str
|
||
description: Optional[str] = None
|
||
allowed_users: Optional[List[str]] = None
|
||
allowed_roles: Optional[List[str]] = None
|
||
allowed_ips: Optional[List[str]] = None
|
||
time_restrictions: Optional[Dict] = None
|
||
max_access_count: Optional[int] = None
|
||
require_approval: bool = False
|
||
|
||
|
||
class AccessPolicyResponse(BaseModel):
|
||
id: str
|
||
project_id: str
|
||
name: str
|
||
description: Optional[str] = None
|
||
allowed_users: Optional[List[str]] = None
|
||
allowed_roles: Optional[List[str]] = None
|
||
allowed_ips: Optional[List[str]] = None
|
||
time_restrictions: Optional[Dict] = None
|
||
max_access_count: Optional[int] = None
|
||
require_approval: bool = False
|
||
is_active: bool = True
|
||
created_at: str
|
||
updated_at: str
|
||
|
||
|
||
class AccessRequestCreateRequest(BaseModel):
|
||
policy_id: str
|
||
request_reason: Optional[str] = None
|
||
expires_hours: int = 24
|
||
|
||
|
||
class AccessRequestResponse(BaseModel):
|
||
id: str
|
||
policy_id: str
|
||
user_id: str
|
||
request_reason: Optional[str] = None
|
||
status: str
|
||
approved_by: Optional[str] = None
|
||
approved_at: Optional[str] = None
|
||
expires_at: Optional[str] = None
|
||
created_at: str
|
||
|
||
|
||
# ==================== Audit Logs API ====================
|
||
|
||
@app.get("/api/v1/audit-logs", response_model=List[AuditLogResponse], tags=["Security"])
|
||
async def get_audit_logs(
|
||
user_id: Optional[str] = None,
|
||
resource_type: Optional[str] = None,
|
||
resource_id: Optional[str] = None,
|
||
action_type: Optional[str] = None,
|
||
start_time: Optional[str] = None,
|
||
end_time: Optional[str] = None,
|
||
success: Optional[bool] = None,
|
||
limit: int = 100,
|
||
offset: int = 0,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""查询审计日志"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
logs = manager.get_audit_logs(
|
||
user_id=user_id,
|
||
resource_type=resource_type,
|
||
resource_id=resource_id,
|
||
action_type=action_type,
|
||
start_time=start_time,
|
||
end_time=end_time,
|
||
success=success,
|
||
limit=limit,
|
||
offset=offset
|
||
)
|
||
|
||
return [
|
||
AuditLogResponse(
|
||
id=log.id,
|
||
action_type=log.action_type,
|
||
user_id=log.user_id,
|
||
user_ip=log.user_ip,
|
||
resource_type=log.resource_type,
|
||
resource_id=log.resource_id,
|
||
action_details=log.action_details,
|
||
success=log.success,
|
||
error_message=log.error_message,
|
||
created_at=log.created_at
|
||
)
|
||
for log in logs
|
||
]
|
||
|
||
|
||
@app.get("/api/v1/audit-logs/stats", response_model=AuditStatsResponse, tags=["Security"])
|
||
async def get_audit_stats(
|
||
start_time: Optional[str] = None,
|
||
end_time: Optional[str] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""获取审计统计"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
stats = manager.get_audit_stats(start_time=start_time, end_time=end_time)
|
||
|
||
return AuditStatsResponse(**stats)
|
||
|
||
|
||
# ==================== Encryption API ====================
|
||
|
||
@app.post("/api/v1/projects/{project_id}/encryption/enable", response_model=EncryptionConfigResponse, tags=["Security"])
|
||
async def enable_project_encryption(
|
||
project_id: str,
|
||
request: EncryptionEnableRequest,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""启用项目端到端加密"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
|
||
try:
|
||
config = manager.enable_encryption(project_id, request.master_password)
|
||
return EncryptionConfigResponse(
|
||
id=config.id,
|
||
project_id=config.project_id,
|
||
is_enabled=config.is_enabled,
|
||
encryption_type=config.encryption_type,
|
||
created_at=config.created_at,
|
||
updated_at=config.updated_at
|
||
)
|
||
except RuntimeError as e:
|
||
raise HTTPException(status_code=400, detail=str(e))
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/encryption/disable", tags=["Security"])
|
||
async def disable_project_encryption(
|
||
project_id: str,
|
||
request: EncryptionEnableRequest,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""禁用项目加密"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
success = manager.disable_encryption(project_id, request.master_password)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=400, detail="Invalid password or encryption not enabled")
|
||
|
||
return {"success": True, "message": "Encryption disabled successfully"}
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/encryption/verify", tags=["Security"])
|
||
async def verify_encryption_password(
|
||
project_id: str,
|
||
request: EncryptionEnableRequest,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""验证加密密码"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
is_valid = manager.verify_encryption_password(project_id, request.master_password)
|
||
|
||
return {"valid": is_valid}
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/encryption", response_model=Optional[EncryptionConfigResponse], tags=["Security"])
|
||
async def get_encryption_config(
|
||
project_id: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""获取项目加密配置"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
config = manager.get_encryption_config(project_id)
|
||
|
||
if not config:
|
||
return None
|
||
|
||
return EncryptionConfigResponse(
|
||
id=config.id,
|
||
project_id=config.project_id,
|
||
is_enabled=config.is_enabled,
|
||
encryption_type=config.encryption_type,
|
||
created_at=config.created_at,
|
||
updated_at=config.updated_at
|
||
)
|
||
|
||
|
||
# ==================== Data Masking API ====================
|
||
|
||
@app.post("/api/v1/projects/{project_id}/masking-rules", response_model=MaskingRuleResponse, tags=["Security"])
|
||
async def create_masking_rule(
|
||
project_id: str,
|
||
request: MaskingRuleCreateRequest,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""创建数据脱敏规则"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
|
||
try:
|
||
rule_type = MaskingRuleType(request.rule_type)
|
||
except ValueError:
|
||
raise HTTPException(status_code=400, detail=f"Invalid rule type: {request.rule_type}")
|
||
|
||
rule = manager.create_masking_rule(
|
||
project_id=project_id,
|
||
name=request.name,
|
||
rule_type=rule_type,
|
||
pattern=request.pattern,
|
||
replacement=request.replacement,
|
||
description=request.description,
|
||
priority=request.priority
|
||
)
|
||
|
||
return MaskingRuleResponse(
|
||
id=rule.id,
|
||
project_id=rule.project_id,
|
||
name=rule.name,
|
||
rule_type=rule.rule_type,
|
||
pattern=rule.pattern,
|
||
replacement=rule.replacement,
|
||
is_active=rule.is_active,
|
||
priority=rule.priority,
|
||
description=rule.description,
|
||
created_at=rule.created_at,
|
||
updated_at=rule.updated_at
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/masking-rules", response_model=List[MaskingRuleResponse], tags=["Security"])
|
||
async def get_masking_rules(
|
||
project_id: str,
|
||
active_only: bool = True,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""获取项目脱敏规则"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
rules = manager.get_masking_rules(project_id, active_only=active_only)
|
||
|
||
return [
|
||
MaskingRuleResponse(
|
||
id=rule.id,
|
||
project_id=rule.project_id,
|
||
name=rule.name,
|
||
rule_type=rule.rule_type,
|
||
pattern=rule.pattern,
|
||
replacement=rule.replacement,
|
||
is_active=rule.is_active,
|
||
priority=rule.priority,
|
||
description=rule.description,
|
||
created_at=rule.created_at,
|
||
updated_at=rule.updated_at
|
||
)
|
||
for rule in rules
|
||
]
|
||
|
||
|
||
@app.put("/api/v1/masking-rules/{rule_id}", response_model=MaskingRuleResponse, tags=["Security"])
|
||
async def update_masking_rule(
|
||
rule_id: str,
|
||
name: Optional[str] = None,
|
||
pattern: Optional[str] = None,
|
||
replacement: Optional[str] = None,
|
||
is_active: Optional[bool] = None,
|
||
priority: Optional[int] = None,
|
||
description: Optional[str] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""更新脱敏规则"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
|
||
kwargs = {}
|
||
if name is not None:
|
||
kwargs["name"] = name
|
||
if pattern is not None:
|
||
kwargs["pattern"] = pattern
|
||
if replacement is not None:
|
||
kwargs["replacement"] = replacement
|
||
if is_active is not None:
|
||
kwargs["is_active"] = is_active
|
||
if priority is not None:
|
||
kwargs["priority"] = priority
|
||
if description is not None:
|
||
kwargs["description"] = description
|
||
|
||
rule = manager.update_masking_rule(rule_id, **kwargs)
|
||
|
||
if not rule:
|
||
raise HTTPException(status_code=404, detail="Masking rule not found")
|
||
|
||
return MaskingRuleResponse(
|
||
id=rule.id,
|
||
project_id=rule.project_id,
|
||
name=rule.name,
|
||
rule_type=rule.rule_type,
|
||
pattern=rule.pattern,
|
||
replacement=rule.replacement,
|
||
is_active=rule.is_active,
|
||
priority=rule.priority,
|
||
description=rule.description,
|
||
created_at=rule.created_at,
|
||
updated_at=rule.updated_at
|
||
)
|
||
|
||
|
||
@app.delete("/api/v1/masking-rules/{rule_id}", tags=["Security"])
|
||
async def delete_masking_rule(
|
||
rule_id: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""删除脱敏规则"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
success = manager.delete_masking_rule(rule_id)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Masking rule not found")
|
||
|
||
return {"success": True, "message": "Masking rule deleted"}
|
||
|
||
|
||
@app.post("/api/v1/projects/{project_id}/masking/apply", response_model=MaskingApplyResponse, tags=["Security"])
|
||
async def apply_masking(
|
||
project_id: str,
|
||
request: MaskingApplyRequest,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""应用脱敏规则到文本"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
|
||
# 转换规则类型
|
||
rule_types = None
|
||
if request.rule_types:
|
||
rule_types = [MaskingRuleType(rt) for rt in request.rule_types]
|
||
|
||
masked_text = manager.apply_masking(request.text, project_id, rule_types)
|
||
|
||
# 获取应用的规则
|
||
rules = manager.get_masking_rules(project_id)
|
||
applied_rules = [r.name for r in rules if r.is_active]
|
||
|
||
return MaskingApplyResponse(
|
||
original_text=request.text,
|
||
masked_text=masked_text,
|
||
applied_rules=applied_rules
|
||
)
|
||
|
||
|
||
# ==================== Data Access Policy API ====================
|
||
|
||
@app.post("/api/v1/projects/{project_id}/access-policies", response_model=AccessPolicyResponse, tags=["Security"])
|
||
async def create_access_policy(
|
||
project_id: str,
|
||
request: AccessPolicyCreateRequest,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""创建数据访问策略"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
|
||
policy = manager.create_access_policy(
|
||
project_id=project_id,
|
||
name=request.name,
|
||
description=request.description,
|
||
allowed_users=request.allowed_users,
|
||
allowed_roles=request.allowed_roles,
|
||
allowed_ips=request.allowed_ips,
|
||
time_restrictions=request.time_restrictions,
|
||
max_access_count=request.max_access_count,
|
||
require_approval=request.require_approval
|
||
)
|
||
|
||
return AccessPolicyResponse(
|
||
id=policy.id,
|
||
project_id=policy.project_id,
|
||
name=policy.name,
|
||
description=policy.description,
|
||
allowed_users=json.loads(policy.allowed_users) if policy.allowed_users else None,
|
||
allowed_roles=json.loads(policy.allowed_roles) if policy.allowed_roles else None,
|
||
allowed_ips=json.loads(policy.allowed_ips) if policy.allowed_ips else None,
|
||
time_restrictions=json.loads(policy.time_restrictions) if policy.time_restrictions else None,
|
||
max_access_count=policy.max_access_count,
|
||
require_approval=policy.require_approval,
|
||
is_active=policy.is_active,
|
||
created_at=policy.created_at,
|
||
updated_at=policy.updated_at
|
||
)
|
||
|
||
|
||
@app.get("/api/v1/projects/{project_id}/access-policies", response_model=List[AccessPolicyResponse], tags=["Security"])
|
||
async def get_access_policies(
|
||
project_id: str,
|
||
active_only: bool = True,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""获取项目访问策略"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
policies = manager.get_access_policies(project_id, active_only=active_only)
|
||
|
||
return [
|
||
AccessPolicyResponse(
|
||
id=policy.id,
|
||
project_id=policy.project_id,
|
||
name=policy.name,
|
||
description=policy.description,
|
||
allowed_users=json.loads(policy.allowed_users) if policy.allowed_users else None,
|
||
allowed_roles=json.loads(policy.allowed_roles) if policy.allowed_roles else None,
|
||
allowed_ips=json.loads(policy.allowed_ips) if policy.allowed_ips else None,
|
||
time_restrictions=json.loads(policy.time_restrictions) if policy.time_restrictions else None,
|
||
max_access_count=policy.max_access_count,
|
||
require_approval=policy.require_approval,
|
||
is_active=policy.is_active,
|
||
created_at=policy.created_at,
|
||
updated_at=policy.updated_at
|
||
)
|
||
for policy in policies
|
||
]
|
||
|
||
|
||
@app.post("/api/v1/access-policies/{policy_id}/check", tags=["Security"])
|
||
async def check_access_permission(
|
||
policy_id: str,
|
||
user_id: str,
|
||
user_ip: Optional[str] = None,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""检查访问权限"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
allowed, reason = manager.check_access_permission(policy_id, user_id, user_ip)
|
||
|
||
return {
|
||
"allowed": allowed,
|
||
"reason": reason if not allowed else None
|
||
}
|
||
|
||
|
||
# ==================== Access Request API ====================
|
||
|
||
@app.post("/api/v1/access-requests", response_model=AccessRequestResponse, tags=["Security"])
|
||
async def create_access_request(
|
||
request: AccessRequestCreateRequest,
|
||
user_id: str, # 实际应该从认证信息中获取
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""创建访问请求"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
|
||
access_request = manager.create_access_request(
|
||
policy_id=request.policy_id,
|
||
user_id=user_id,
|
||
request_reason=request.request_reason,
|
||
expires_hours=request.expires_hours
|
||
)
|
||
|
||
return AccessRequestResponse(
|
||
id=access_request.id,
|
||
policy_id=access_request.policy_id,
|
||
user_id=access_request.user_id,
|
||
request_reason=access_request.request_reason,
|
||
status=access_request.status,
|
||
approved_by=access_request.approved_by,
|
||
approved_at=access_request.approved_at,
|
||
expires_at=access_request.expires_at,
|
||
created_at=access_request.created_at
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/access-requests/{request_id}/approve", response_model=AccessRequestResponse, tags=["Security"])
|
||
async def approve_access_request(
|
||
request_id: str,
|
||
approved_by: str,
|
||
expires_hours: int = 24,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""批准访问请求"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
access_request = manager.approve_access_request(request_id, approved_by, expires_hours)
|
||
|
||
if not access_request:
|
||
raise HTTPException(status_code=404, detail="Access request not found")
|
||
|
||
return AccessRequestResponse(
|
||
id=access_request.id,
|
||
policy_id=access_request.policy_id,
|
||
user_id=access_request.user_id,
|
||
request_reason=access_request.request_reason,
|
||
status=access_request.status,
|
||
approved_by=access_request.approved_by,
|
||
approved_at=access_request.approved_at,
|
||
expires_at=access_request.expires_at,
|
||
created_at=access_request.created_at
|
||
)
|
||
|
||
|
||
@app.post("/api/v1/access-requests/{request_id}/reject", response_model=AccessRequestResponse, tags=["Security"])
|
||
async def reject_access_request(
|
||
request_id: str,
|
||
rejected_by: str,
|
||
api_key: str = Depends(verify_api_key)
|
||
):
|
||
"""拒绝访问请求"""
|
||
if not SECURITY_MANAGER_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Security manager not available")
|
||
|
||
manager = get_security_manager()
|
||
access_request = manager.reject_access_request(request_id, rejected_by)
|
||
|
||
if not access_request:
|
||
raise HTTPException(status_code=404, detail="Access request not found")
|
||
|
||
return AccessRequestResponse(
|
||
id=access_request.id,
|
||
policy_id=access_request.policy_id,
|
||
user_id=access_request.user_id,
|
||
request_reason=access_request.request_reason,
|
||
status=access_request.status,
|
||
approved_by=access_request.approved_by,
|
||
approved_at=access_request.approved_at,
|
||
expires_at=access_request.expires_at,
|
||
created_at=access_request.created_at
|
||
)
|
||
|
||
|
||
# ==========================================
|
||
# Phase 7 Task 4: 协作与共享 API
|
||
# ==========================================
|
||
|
||
# ----- 请求模型 -----
|
||
|
||
class ShareLinkCreate(BaseModel):
|
||
permission: str = "read_only" # read_only, comment, edit, admin
|
||
expires_in_days: Optional[int] = None
|
||
max_uses: Optional[int] = None
|
||
password: Optional[str] = None
|
||
allow_download: bool = False
|
||
allow_export: bool = False
|
||
|
||
class ShareLinkVerify(BaseModel):
|
||
token: str
|
||
password: Optional[str] = None
|
||
|
||
class CommentCreate(BaseModel):
|
||
target_type: str # entity, relation, transcript, project
|
||
target_id: str
|
||
parent_id: Optional[str] = None
|
||
content: str
|
||
mentions: Optional[List[str]] = None
|
||
|
||
class CommentUpdate(BaseModel):
|
||
content: str
|
||
|
||
class CommentResolve(BaseModel):
|
||
resolved: bool
|
||
|
||
class TeamMemberInvite(BaseModel):
|
||
user_id: str
|
||
user_name: str
|
||
user_email: str
|
||
role: str = "viewer" # owner, admin, editor, viewer, commenter
|
||
|
||
class TeamMemberRoleUpdate(BaseModel):
|
||
role: str
|
||
|
||
|
||
# ----- 项目分享 -----
|
||
|
||
@app.post("/api/v1/projects/{project_id}/shares")
|
||
async def create_share_link(project_id: str, request: ShareLinkCreate, created_by: str = "current_user"):
|
||
"""创建项目分享链接"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
share = manager.create_share_link(
|
||
project_id=project_id,
|
||
created_by=created_by,
|
||
permission=request.permission,
|
||
expires_in_days=request.expires_in_days,
|
||
max_uses=request.max_uses,
|
||
password=request.password,
|
||
allow_download=request.allow_download,
|
||
allow_export=request.allow_export
|
||
)
|
||
|
||
return {
|
||
"id": share.id,
|
||
"token": share.token,
|
||
"permission": share.permission,
|
||
"created_at": share.created_at,
|
||
"expires_at": share.expires_at,
|
||
"max_uses": share.max_uses,
|
||
"share_url": f"/share/{share.token}"
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/shares")
|
||
async def list_project_shares(project_id: str):
|
||
"""列出项目的所有分享链接"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
shares = manager.list_project_shares(project_id)
|
||
|
||
return {
|
||
"shares": [
|
||
{
|
||
"id": s.id,
|
||
"token": s.token,
|
||
"permission": s.permission,
|
||
"created_at": s.created_at,
|
||
"expires_at": s.expires_at,
|
||
"use_count": s.use_count,
|
||
"max_uses": s.max_uses,
|
||
"is_active": s.is_active,
|
||
"has_password": s.password_hash is not None,
|
||
"allow_download": s.allow_download,
|
||
"allow_export": s.allow_export
|
||
}
|
||
for s in shares
|
||
]
|
||
}
|
||
|
||
@app.post("/api/v1/shares/verify")
|
||
async def verify_share_link(request: ShareLinkVerify):
|
||
"""验证分享链接"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
share = manager.validate_share_token(request.token, request.password)
|
||
|
||
if not share:
|
||
raise HTTPException(status_code=401, detail="Invalid or expired share link")
|
||
|
||
# 增加使用次数
|
||
manager.increment_share_usage(request.token)
|
||
|
||
return {
|
||
"valid": True,
|
||
"project_id": share.project_id,
|
||
"permission": share.permission,
|
||
"allow_download": share.allow_download,
|
||
"allow_export": share.allow_export
|
||
}
|
||
|
||
@app.get("/api/v1/shares/{token}/access")
|
||
async def access_shared_project(token: str, password: Optional[str] = None):
|
||
"""通过分享链接访问项目"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
share = manager.validate_share_token(token, password)
|
||
|
||
if not share:
|
||
raise HTTPException(status_code=401, detail="Invalid or expired share link")
|
||
|
||
# 增加使用次数
|
||
manager.increment_share_usage(token)
|
||
|
||
# 获取项目信息
|
||
if not DB_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Database not available")
|
||
|
||
db = get_db_manager()
|
||
project = db.get_project(share.project_id)
|
||
|
||
if not project:
|
||
raise HTTPException(status_code=404, detail="Project not found")
|
||
|
||
return {
|
||
"project": {
|
||
"id": project.id,
|
||
"name": project.name,
|
||
"description": project.description,
|
||
"created_at": project.created_at
|
||
},
|
||
"permission": share.permission,
|
||
"allow_download": share.allow_download,
|
||
"allow_export": share.allow_export
|
||
}
|
||
|
||
@app.delete("/api/v1/shares/{share_id}")
|
||
async def revoke_share_link(share_id: str, revoked_by: str = "current_user"):
|
||
"""撤销分享链接"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
success = manager.revoke_share_link(share_id, revoked_by)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Share link not found")
|
||
|
||
return {"success": True, "message": "Share link revoked"}
|
||
|
||
# ----- 评论和批注 -----
|
||
|
||
@app.post("/api/v1/projects/{project_id}/comments")
|
||
async def add_comment(project_id: str, request: CommentCreate, author: str = "current_user", author_name: str = "User"):
|
||
"""添加评论"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
comment = manager.add_comment(
|
||
project_id=project_id,
|
||
target_type=request.target_type,
|
||
target_id=request.target_id,
|
||
author=author,
|
||
author_name=author_name,
|
||
content=request.content,
|
||
parent_id=request.parent_id,
|
||
mentions=request.mentions
|
||
)
|
||
|
||
return {
|
||
"id": comment.id,
|
||
"target_type": comment.target_type,
|
||
"target_id": comment.target_id,
|
||
"parent_id": comment.parent_id,
|
||
"author": comment.author,
|
||
"author_name": comment.author_name,
|
||
"content": comment.content,
|
||
"created_at": comment.created_at,
|
||
"resolved": comment.resolved
|
||
}
|
||
|
||
@app.get("/api/v1/{target_type}/{target_id}/comments")
|
||
async def get_comments(target_type: str, target_id: str, include_resolved: bool = True):
|
||
"""获取评论列表"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
comments = manager.get_comments(target_type, target_id, include_resolved)
|
||
|
||
return {
|
||
"count": len(comments),
|
||
"comments": [
|
||
{
|
||
"id": c.id,
|
||
"parent_id": c.parent_id,
|
||
"author": c.author,
|
||
"author_name": c.author_name,
|
||
"content": c.content,
|
||
"created_at": c.created_at,
|
||
"updated_at": c.updated_at,
|
||
"resolved": c.resolved,
|
||
"resolved_by": c.resolved_by,
|
||
"resolved_at": c.resolved_at
|
||
}
|
||
for c in comments
|
||
]
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/comments")
|
||
async def get_project_comments(project_id: str, limit: int = 50, offset: int = 0):
|
||
"""获取项目下的所有评论"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
comments = manager.get_project_comments(project_id, limit, offset)
|
||
|
||
return {
|
||
"count": len(comments),
|
||
"comments": [
|
||
{
|
||
"id": c.id,
|
||
"target_type": c.target_type,
|
||
"target_id": c.target_id,
|
||
"parent_id": c.parent_id,
|
||
"author": c.author,
|
||
"author_name": c.author_name,
|
||
"content": c.content,
|
||
"created_at": c.created_at,
|
||
"resolved": c.resolved
|
||
}
|
||
for c in comments
|
||
]
|
||
}
|
||
|
||
@app.put("/api/v1/comments/{comment_id}")
|
||
async def update_comment(comment_id: str, request: CommentUpdate, updated_by: str = "current_user"):
|
||
"""更新评论"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
comment = manager.update_comment(comment_id, request.content, updated_by)
|
||
|
||
if not comment:
|
||
raise HTTPException(status_code=404, detail="Comment not found or not authorized")
|
||
|
||
return {
|
||
"id": comment.id,
|
||
"content": comment.content,
|
||
"updated_at": comment.updated_at
|
||
}
|
||
|
||
@app.post("/api/v1/comments/{comment_id}/resolve")
|
||
async def resolve_comment(comment_id: str, resolved_by: str = "current_user"):
|
||
"""标记评论为已解决"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
success = manager.resolve_comment(comment_id, resolved_by)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Comment not found")
|
||
|
||
return {"success": True, "message": "Comment resolved"}
|
||
|
||
@app.delete("/api/v1/comments/{comment_id}")
|
||
async def delete_comment(comment_id: str, deleted_by: str = "current_user"):
|
||
"""删除评论"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
success = manager.delete_comment(comment_id, deleted_by)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Comment not found or not authorized")
|
||
|
||
return {"success": True, "message": "Comment deleted"}
|
||
|
||
# ----- 变更历史 -----
|
||
|
||
@app.get("/api/v1/projects/{project_id}/history")
|
||
async def get_change_history(
|
||
project_id: str,
|
||
entity_type: Optional[str] = None,
|
||
entity_id: Optional[str] = None,
|
||
limit: int = 50,
|
||
offset: int = 0
|
||
):
|
||
"""获取变更历史"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
records = manager.get_change_history(project_id, entity_type, entity_id, limit, offset)
|
||
|
||
return {
|
||
"count": len(records),
|
||
"history": [
|
||
{
|
||
"id": r.id,
|
||
"change_type": r.change_type,
|
||
"entity_type": r.entity_type,
|
||
"entity_id": r.entity_id,
|
||
"entity_name": r.entity_name,
|
||
"changed_by": r.changed_by,
|
||
"changed_by_name": r.changed_by_name,
|
||
"changed_at": r.changed_at,
|
||
"old_value": r.old_value,
|
||
"new_value": r.new_value,
|
||
"description": r.description,
|
||
"reverted": r.reverted
|
||
}
|
||
for r in records
|
||
]
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/history/stats")
|
||
async def get_change_history_stats(project_id: str):
|
||
"""获取变更统计"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
stats = manager.get_change_stats(project_id)
|
||
|
||
return stats
|
||
|
||
@app.get("/api/v1/{entity_type}/{entity_id}/versions")
|
||
async def get_entity_versions(entity_type: str, entity_id: str):
|
||
"""获取实体版本历史"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
records = manager.get_entity_version_history(entity_type, entity_id)
|
||
|
||
return {
|
||
"count": len(records),
|
||
"versions": [
|
||
{
|
||
"id": r.id,
|
||
"change_type": r.change_type,
|
||
"changed_by": r.changed_by,
|
||
"changed_by_name": r.changed_by_name,
|
||
"changed_at": r.changed_at,
|
||
"old_value": r.old_value,
|
||
"new_value": r.new_value,
|
||
"description": r.description
|
||
}
|
||
for r in records
|
||
]
|
||
}
|
||
|
||
@app.post("/api/v1/history/{record_id}/revert")
|
||
async def revert_change(record_id: str, reverted_by: str = "current_user"):
|
||
"""回滚变更"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
success = manager.revert_change(record_id, reverted_by)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Change record not found or already reverted")
|
||
|
||
return {"success": True, "message": "Change reverted"}
|
||
|
||
# ----- 团队成员 -----
|
||
|
||
@app.post("/api/v1/projects/{project_id}/members")
|
||
async def invite_team_member(project_id: str, request: TeamMemberInvite, invited_by: str = "current_user"):
|
||
"""邀请团队成员"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
member = manager.add_team_member(
|
||
project_id=project_id,
|
||
user_id=request.user_id,
|
||
user_name=request.user_name,
|
||
user_email=request.user_email,
|
||
role=request.role,
|
||
invited_by=invited_by
|
||
)
|
||
|
||
return {
|
||
"id": member.id,
|
||
"user_id": member.user_id,
|
||
"user_name": member.user_name,
|
||
"user_email": member.user_email,
|
||
"role": member.role,
|
||
"joined_at": member.joined_at,
|
||
"permissions": member.permissions
|
||
}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/members")
|
||
async def list_team_members(project_id: str):
|
||
"""列出团队成员"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
members = manager.get_team_members(project_id)
|
||
|
||
return {
|
||
"count": len(members),
|
||
"members": [
|
||
{
|
||
"id": m.id,
|
||
"user_id": m.user_id,
|
||
"user_name": m.user_name,
|
||
"user_email": m.user_email,
|
||
"role": m.role,
|
||
"joined_at": m.joined_at,
|
||
"last_active_at": m.last_active_at,
|
||
"permissions": m.permissions
|
||
}
|
||
for m in members
|
||
]
|
||
}
|
||
|
||
@app.put("/api/v1/members/{member_id}/role")
|
||
async def update_member_role(member_id: str, request: TeamMemberRoleUpdate, updated_by: str = "current_user"):
|
||
"""更新成员角色"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
success = manager.update_member_role(member_id, request.role, updated_by)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Member not found")
|
||
|
||
return {"success": True, "message": "Member role updated"}
|
||
|
||
@app.delete("/api/v1/members/{member_id}")
|
||
async def remove_team_member(member_id: str, removed_by: str = "current_user"):
|
||
"""移除团队成员"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
success = manager.remove_team_member(member_id, removed_by)
|
||
|
||
if not success:
|
||
raise HTTPException(status_code=404, detail="Member not found")
|
||
|
||
return {"success": True, "message": "Member removed"}
|
||
|
||
@app.get("/api/v1/projects/{project_id}/permissions")
|
||
async def check_project_permissions(project_id: str, user_id: str = "current_user"):
|
||
"""检查用户权限"""
|
||
if not COLLABORATION_AVAILABLE:
|
||
raise HTTPException(status_code=503, detail="Collaboration module not available")
|
||
|
||
manager = get_collab_manager()
|
||
members = manager.get_team_members(project_id)
|
||
|
||
user_member = None
|
||
for m in members:
|
||
if m.user_id == user_id:
|
||
user_member = m
|
||
break
|
||
|
||
if not user_member:
|
||
return {
|
||
"has_access": False,
|
||
"role": None,
|
||
"permissions": []
|
||
}
|
||
|
||
return {
|
||
"has_access": True,
|
||
"role": user_member.role,
|
||
"permissions": user_member.permissions
|
||
}
|
||
|
||
|
||
# Serve frontend - MUST be last to not override API routes
|
||
app.mount("/", StaticFiles(directory="frontend", html=True), name="frontend")
|
||
|
||
if __name__ == "__main__":
|
||
import uvicorn
|
||
uvicorn.run(app, host="0.0.0.0", port=8000)
|