完整实现 Swarm 多智能体协作系统

- 新增 CLIPluginAdapter 统一接口 (backend/app/core/agent_adapter.py)
- 新增 LLM 服务层,支持 Anthropic/OpenAI/DeepSeek/Ollama (backend/app/services/llm_service.py)
- 新增 Agent 执行引擎,支持文件锁自动管理 (backend/app/services/agent_executor.py)
- 新增 NativeLLMAgent 原生 LLM 适配器 (backend/app/adapters/native_llm_agent.py)
- 新增进程管理器 (backend/app/services/process_manager.py)
- 新增 Agent 控制 API (backend/app/routers/agents_control.py)
- 新增 WebSocket 实时通信 (backend/app/routers/websocket.py)
- 更新前端 AgentsPage,支持启动/停止 Agent
- 测试通过:Agent 启动、批量操作、栅栏同步

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Claude Code
2026-03-09 17:32:11 +08:00
commit dc398d7c7b
118 changed files with 23120 additions and 0 deletions

View File

@@ -0,0 +1,4 @@
"""Services Package"""
from .storage import StorageService, get_storage
__all__ = ["StorageService", "get_storage"]

View File

@@ -0,0 +1,486 @@
"""
Agent 执行引擎
负责协调 LLM 调用和资源管理,提供声明式的任务执行接口。
自动管理文件锁、心跳更新等生命周期。
"""
import asyncio
import logging
import re
import time
from typing import Dict, List, Optional, Any, Callable
from dataclasses import dataclass, field
from pathlib import Path
from .llm_service import ModelRouter, LLMMessage, TaskType
from .storage import get_storage
from .file_lock import get_file_lock_service
from .heartbeat import get_heartbeat_service
from .agent_registry import get_agent_registry, AgentInfo
from ..core.agent_adapter import Task, Result
logger = logging.getLogger(__name__)
@dataclass
class ExecutionPlan:
"""任务执行计划"""
steps: List[str] = field(default_factory=list)
required_files: List[str] = field(default_factory=list)
estimated_duration: str = ""
complexity: str = "medium"
requires_code_execution: bool = False
subtasks: List[str] = field(default_factory=list)
@dataclass
class ExecutionContext:
"""执行上下文"""
agent_id: str
agent_role: str
agent_model: str
task_id: str
acquired_locks: List[str] = field(default_factory=list)
start_time: float = 0
messages: List[LLMMessage] = field(default_factory=list)
metadata: Dict[str, Any] = field(default_factory=dict)
class AgentExecutor:
"""
Agent 任务执行引擎
功能:
1. 任务分析 - 解析任务描述,识别需要的文件
2. 计划生成 - 调用 LLM 生成执行计划
3. 资源管理 - 自动获取和释放文件锁
4. 任务执行 - 调用 LLM 执行任务
5. 结果处理 - 格式化输出,更新状态
"""
def __init__(
self,
llm_service: ModelRouter = None,
storage=None,
lock_service=None,
heartbeat_service=None,
registry=None
):
self.llm = llm_service
self.storage = storage or get_storage()
self.locks = lock_service or get_file_lock_service()
self.heartbeat = heartbeat_service or get_heartbeat_service()
self.registry = registry or get_agent_registry()
# 工作目录
self.work_dir = Path.cwd()
async def execute_task(
self,
agent: AgentInfo,
task: Task,
context: Dict[str, Any] = None
) -> Result:
"""
执行任务的主入口
自动管理:
1. 文件锁获取和释放
2. 心跳更新
3. 任务进度跟踪
4. 错误处理和恢复
"""
execution_context = ExecutionContext(
agent_id=agent.agent_id,
agent_role=agent.role,
agent_model=agent.model,
task_id=task.task_id,
start_time=time.time(),
metadata=context or {}
)
try:
# 1. 更新心跳 - 开始执行
await self.heartbeat.update_heartbeat(
agent.agent_id,
"working",
task.description[:100], # 截断过长描述
0
)
# 2. 分析任务,识别需要的文件
execution_context.required_files = await self._analyze_required_files(
task.description
)
# 3. 获取文件锁
await self._acquire_locks(execution_context)
# 4. 构建执行上下文消息
execution_context.messages = await self._build_messages(
agent, task, execution_context
)
# 5. 调用 LLM 执行任务
llm_response = await self._call_llm(execution_context)
# 6. 处理结果
result = await self._process_result(
llm_response, execution_context
)
# 7. 更新心跳 - 完成
await self.heartbeat.update_heartbeat(
agent.agent_id,
"idle",
"",
100
)
return result
except Exception as e:
logger.error(f"任务执行失败: {e}", exc_info=True)
# 更新心跳为错误状态
await self.heartbeat.update_heartbeat(
agent.agent_id,
"error",
str(e),
0
)
return Result(
success=False,
output="",
error=str(e),
execution_time=time.time() - execution_context.start_time
)
finally:
# 8. 释放所有锁
await self._release_locks(execution_context)
async def _analyze_required_files(self, task_description: str) -> List[str]:
"""
分析任务描述,识别需要的文件
使用 LLM 分析任务中提到的文件路径
"""
# 使用正则表达式快速匹配文件路径
file_patterns = [
r'[a-zA-Z_/\\][a-zA-Z0-9_/\\]*\.(?:py|js|ts|tsx|jsx|java|go|rs|c|h|cpp|hpp|css|html|md|json|yaml|yml)',
r'[a-zA-Z_/\\][a-zA-Z0-9_/\\]*\.(?:py|js|ts|tsx|jsx)',
r'src/[a-zA-Z0-9_/\\]*',
r'app/[a-zA-Z0-9_/\\]*',
r'components/[a-zA-Z0-9_/\\]*',
r'pages/[a-zA-Z0-9_/\\]*',
r'services/[a-zA-Z0-9_/\\]*',
r'utils/[a-zA-Z0-9_/\\]*',
]
files = set()
for pattern in file_patterns:
matches = re.findall(pattern, task_description)
files.update(matches)
# 规范化路径
normalized_files = []
for f in files:
# 转换反斜杠
f = f.replace('\\', '/')
# 移除重复的斜杠
f = re.sub(r'/+', '/', f)
if f not in normalized_files:
normalized_files.append(f)
logger.debug(f"识别到文件: {normalized_files}")
return normalized_files
async def _acquire_locks(self, context: ExecutionContext) -> None:
"""获取所有需要的文件锁"""
for file_path in context.required_files:
success = await self.locks.acquire_lock(
file_path,
context.agent_id,
context.agent_role.upper()
)
if success:
context.acquired_locks.append(file_path)
logger.debug(f"获取锁成功: {file_path}")
else:
logger.warning(f"获取锁失败: {file_path} (可能被其他 Agent 占用)")
async def _release_locks(self, context: ExecutionContext) -> None:
"""释放所有获取的文件锁"""
for file_path in context.acquired_locks:
try:
await self.locks.release_lock(file_path, context.agent_id)
logger.debug(f"释放锁: {file_path}")
except Exception as e:
logger.warning(f"释放锁失败: {file_path}: {e}")
async def _build_messages(
self,
agent: AgentInfo,
task: Task,
context: ExecutionContext
) -> List[LLMMessage]:
"""构建 LLM 消息列表"""
messages = []
# 系统提示词
system_prompt = self._build_system_prompt(agent, context)
messages.append(LLMMessage(role="system", content=system_prompt))
# 添加上下文信息
if context.required_files:
context_info = f"\n相关文件: {', '.join(context.required_files)}\n"
# 尝试读取文件内容
file_contents = await self._read_file_contents(context.required_files)
if file_contents:
context_info += f"\n文件内容:\n{file_contents}\n"
messages.append(LLMMessage(role="user", content=context_info))
# 任务描述
messages.append(LLMMessage(role="user", content=task.description))
# 添加额外上下文
if task.context:
context_str = f"\n额外上下文:\n{json.dumps(task.context, ensure_ascii=False, indent=2)}\n"
messages.append(LLMMessage(role="user", content=context_str))
return messages
def _build_system_prompt(self, agent: AgentInfo, context: ExecutionContext) -> str:
"""构建系统提示词"""
role_prompts = {
"architect": """
你是一个系统架构师。你擅长:
- 系统设计和模块划分
- 技术选型和架构决策
- 接口设计和数据流规划
- 性能优化和扩展性考虑
请给出清晰、完整的架构方案。
""",
"pm": """
你是一个产品经理。你擅长:
- 需求分析和用户故事
- 功能优先级排序
- 产品规划
- 用户体验考虑
请从用户角度分析需求。
""",
"developer": """
你是一个高级开发工程师。你擅长:
- 编写高质量、可维护的代码
- 遵循最佳实践和编码规范
- 考虑边界情况和错误处理
- 编写清晰的注释和文档
请给出可以直接使用的代码实现。
""",
"reviewer": """
你是一个代码审查专家。你擅长:
- 发现代码中的潜在问题
- 安全漏洞检测
- 性能问题识别
- 代码风格和可读性改进
请给出详细的审查意见。
""",
"qa": """
你是一个测试工程师。你擅长:
- 编写全面的测试用例
- 边界条件测试
- 自动化测试
- 测试策略制定
请给出完整的测试方案。
"""
}
base_prompt = f"""你是 {agent.name},一个 AI 编程助手。
当前任务 ID: {context.task_id}
你的角色: {agent.role}
使用的模型: {agent.model}
工作原则:
1. 仔细理解任务需求
2. 给出清晰、具体的回答
3. 如果涉及代码,确保代码正确且可运行
4. 考虑边界情况和错误处理
5. 必要时给出解释和说明
"""
role_prompt = role_prompts.get(agent.role, "")
return base_prompt + role_prompt
async def _read_file_contents(self, file_paths: List[str]) -> str:
"""读取文件内容(如果存在)"""
contents = []
for file_path in file_paths[:3]: # 限制读取文件数量
full_path = self.work_dir / file_path
if full_path.exists():
try:
with open(full_path, 'r', encoding='utf-8') as f:
content = f.read()
# 限制每个文件的内容长度
if len(content) > 2000:
content = content[:2000] + "\n... (文件过长,已截断)"
contents.append(f"### {file_path}\n```\n{content}\n```")
except Exception as e:
logger.warning(f"读取文件失败: {file_path}: {e}")
return "\n\n".join(contents)
async def _call_llm(self, context: ExecutionContext) -> str:
"""调用 LLM 执行任务"""
if not self.llm:
# 如果没有配置 LLM 服务,使用模拟响应
return await self._mock_llm_response(context)
response = await self.llm.route_task(
task=context.messages[-1].content,
messages=context.messages,
preferred_model=context.agent_model
)
logger.info(f"LLM 调用完成: {response.provider}/{response.model}, "
f"tokens: {response.tokens_used}, latency: {response.latency:.2f}s")
return response.content
async def _mock_llm_response(self, context: ExecutionContext) -> str:
"""模拟 LLM 响应(用于测试)"""
await asyncio.sleep(0.5) # 模拟延迟
return f"""[模拟响应]
作为 {context.agent_role},我对任务的分析如下:
任务需要处理的文件: {', '.join(context.required_files) or ''}
## 分析
这是一个模拟响应,表示系统正在正常工作。
## 建议
1. 配置 LLM API 密钥以启用真实 AI 能力
2. 在环境变量中设置 ANTHROPIC_API_KEY 或 DEEPSEEK_API_KEY
3. 重启服务后即可使用完整功能
---
*Agent ID: {context.agent_id}*
*任务 ID: {context.task_id}*
"""
async def _process_result(
self,
llm_output: str,
context: ExecutionContext
) -> Result:
"""处理 LLM 输出,返回格式化结果"""
execution_time = time.time() - context.start_time
return Result(
success=True,
output=llm_output,
metadata={
"agent_id": context.agent_id,
"agent_role": context.agent_role,
"agent_model": context.agent_model,
"task_id": context.task_id,
"required_files": context.required_files,
"acquired_locks": context.acquired_locks,
"execution_time": execution_time
},
execution_time=execution_time
)
async def create_execution_plan(
self,
agent: AgentInfo,
task: str
) -> ExecutionPlan:
"""
创建任务执行计划
使用 LLM 分析任务,生成详细的执行步骤
"""
if not self.llm:
return self._create_mock_plan(task)
plan_prompt = f"""
请分析以下任务,生成执行计划。
任务: {task}
请返回 JSON 格式的执行计划,包含:
{{
"steps": ["步骤1", "步骤2", ...],
"required_files": ["file1.py", "file2.js", ...],
"estimated_duration": "预计时间",
"complexity": "simple|medium|complex",
"requires_code_execution": true/false,
"subtasks": ["子任务1", "子任务2", ...]
}}
"""
try:
response = await self.llm.route_task(
task=plan_prompt,
messages=[LLMMessage(role="user", content=plan_prompt)]
)
# 尝试解析 JSON
import json
plan_data = json.loads(response)
return ExecutionPlan(
steps=plan_data.get("steps", []),
required_files=plan_data.get("required_files", []),
estimated_duration=plan_data.get("estimated_duration", ""),
complexity=plan_data.get("complexity", "medium"),
requires_code_execution=plan_data.get("requires_code_execution", False),
subtasks=plan_data.get("subtasks", [])
)
except Exception as e:
logger.warning(f"解析执行计划失败: {e}")
return self._create_mock_plan(task)
def _create_mock_plan(self, task: str) -> ExecutionPlan:
"""创建模拟执行计划"""
return ExecutionPlan(
steps=[
"1. 分析任务需求",
"2. 查看相关文件",
"3. 制定实现方案",
"4. 执行实现"
],
estimated_duration="5-10 分钟",
complexity="medium"
)
# 单例获取函数
_executor: Optional[AgentExecutor] = None
def get_agent_executor(llm_service: ModelRouter = None) -> AgentExecutor:
"""获取 Agent 执行引擎单例"""
global _executor
if _executor is None:
_executor = AgentExecutor(llm_service=llm_service)
return _executor
def reset_agent_executor():
"""重置执行引擎(主要用于测试)"""
global _executor
_executor = None

View File

@@ -0,0 +1,261 @@
"""
Agent 注册服务 - 管理 Agent 的注册信息和状态
每个 Agent 有独立的目录存储其配置和状态
"""
import asyncio
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass, asdict
from enum import Enum
from .storage import get_storage
class AgentRole(str, Enum):
"""Agent 角色枚举"""
ARCHITECT = "architect"
PRODUCT_MANAGER = "pm"
DEVELOPER = "developer"
QA = "qa"
REVIEWER = "reviewer"
HUMAN = "human"
@dataclass
class AgentInfo:
"""Agent 基本信息"""
agent_id: str # 唯一标识符,如 claude-001
name: str # 显示名称,如 Claude Code
role: str # 角色architect, pm, developer, qa, reviewer, human
model: str # 模型claude-opus-4.6, gpt-4o, human 等
description: str = "" # 描述
created_at: str = "" # 注册时间
status: str = "idle" # 当前状态
def __post_init__(self):
if not self.created_at:
self.created_at = datetime.now().isoformat()
@dataclass
class AgentState:
"""Agent 运行时状态"""
agent_id: str
current_task: str = ""
progress: int = 0
working_files: List[str] = None
last_update: str = ""
def __post_init__(self):
if self.working_files is None:
self.working_files = []
if not self.last_update:
self.last_update = datetime.now().isoformat()
class AgentRegistry:
"""
Agent 注册服务
管理所有 Agent 的注册信息和运行时状态
"""
def __init__(self):
self._storage = get_storage()
self._lock = asyncio.Lock()
def _get_agent_dir(self, agent_id: str) -> str:
"""获取 Agent 目录路径"""
return f"agents/{agent_id}"
def _get_agent_info_file(self, agent_id: str) -> str:
"""获取 Agent 信息文件路径"""
return f"{self._get_agent_dir(agent_id)}/info.json"
def _get_agent_state_file(self, agent_id: str) -> str:
"""获取 Agent 状态文件路径"""
return f"{self._get_agent_dir(agent_id)}/state.json"
async def register_agent(
self,
agent_id: str,
name: str,
role: str,
model: str,
description: str = ""
) -> AgentInfo:
"""
注册新 Agent
Args:
agent_id: Agent ID
name: 显示名称
role: 角色
model: 模型
description: 描述
Returns:
注册的 Agent 信息
"""
async with self._lock:
agent_info = AgentInfo(
agent_id=agent_id,
name=name,
role=role,
model=model,
description=description
)
# 确保 Agent 目录存在
await self._storage.ensure_dir(self._get_agent_dir(agent_id))
# 保存 Agent 信息
await self._storage.write_json(
self._get_agent_info_file(agent_id),
asdict(agent_info)
)
# 初始化状态
await self._storage.write_json(
self._get_agent_state_file(agent_id),
asdict(AgentState(agent_id=agent_id))
)
return agent_info
async def get_agent(self, agent_id: str) -> Optional[AgentInfo]:
"""
获取 Agent 信息
Args:
agent_id: Agent ID
Returns:
Agent 信息,不存在返回 None
"""
data = await self._storage.read_json(self._get_agent_info_file(agent_id))
if data:
return AgentInfo(**data)
return None
async def list_agents(self) -> List[AgentInfo]:
"""
列出所有已注册的 Agent
Returns:
Agent 信息列表
"""
agents = []
agents_dir = Path(self._storage.base_path) / "agents"
if not agents_dir.exists():
return []
for agent_dir in agents_dir.iterdir():
if agent_dir.is_dir():
info_file = agent_dir / "info.json"
if info_file.exists():
data = await self._storage.read_json(f"agents/{agent_dir.name}/info.json")
if data:
agents.append(AgentInfo(**data))
return agents
async def update_state(
self,
agent_id: str,
task: str = "",
progress: int = 0,
working_files: List[str] = None
) -> None:
"""
更新 Agent 状态
Args:
agent_id: Agent ID
task: 当前任务
progress: 进度 0-100
working_files: 正在处理的文件列表
"""
async with self._lock:
state_file = self._get_agent_state_file(agent_id)
# 读取现有状态
existing = await self._storage.read_json(state_file)
# 更新状态
state = AgentState(
agent_id=agent_id,
current_task=task or existing.get("current_task", ""),
progress=progress or existing.get("progress", 0),
working_files=working_files or existing.get("working_files", []),
last_update=datetime.now().isoformat()
)
await self._storage.write_json(state_file, asdict(state))
async def get_state(self, agent_id: str) -> Optional[AgentState]:
"""
获取 Agent 状态
Args:
agent_id: Agent ID
Returns:
Agent 状态,不存在返回 None
"""
data = await self._storage.read_json(self._get_agent_state_file(agent_id))
if data:
return AgentState(**data)
return None
async def unregister_agent(self, agent_id: str) -> bool:
"""
注销 Agent
Args:
agent_id: Agent ID
Returns:
是否成功注销
"""
async with self._lock:
agent_info = await self.get_agent(agent_id)
if not agent_info:
return False
# 删除 Agent 目录
agent_dir = self._get_agent_dir(agent_id)
# 实际实现中可能需要递归删除
# 这里简化处理,只删除 info.json 和 state.json
await self._storage.delete(f"{agent_dir}/info.json")
await self._storage.delete(f"{agent_dir}/state.json")
return True
async def get_agents_by_role(self, role: str) -> List[AgentInfo]:
"""
获取指定角色的所有 Agent
Args:
role: 角色
Returns:
符合条件的 Agent 列表
"""
all_agents = await self.list_agents()
return [agent for agent in all_agents if agent.role == role]
# 全局单例
_registry_instance: Optional[AgentRegistry] = None
def get_agent_registry() -> AgentRegistry:
"""获取 Agent 注册服务单例"""
global _registry_instance
if _registry_instance is None:
_registry_instance = AgentRegistry()
return _registry_instance

View File

@@ -0,0 +1,115 @@
"""
文件锁服务 - 管理 Agent 对文件的访问锁
"""
import asyncio
from datetime import datetime
from typing import Dict, List, Optional
from dataclasses import dataclass, asdict
from .storage import get_storage
@dataclass
class LockInfo:
"""文件锁信息"""
file_path: str
agent_id: str
acquired_at: str
agent_name: str = ""
@property
def elapsed_seconds(self) -> int:
acquired_time = datetime.fromisoformat(self.acquired_at)
return int((datetime.now() - acquired_time).total_seconds())
@property
def elapsed_display(self) -> str:
seconds = self.elapsed_seconds
if seconds < 60:
return f"{seconds}s ago"
minutes = seconds // 60
secs = seconds % 60
return f"{minutes}m {secs:02d}s ago"
class FileLockService:
"""文件锁服务"""
LOCKS_FILE = "cache/file_locks.json"
LOCK_TIMEOUT = 300
def __init__(self):
self._storage = get_storage()
self._lock = asyncio.Lock()
async def _load_locks(self) -> Dict[str, Dict]:
return await self._storage.read_json(self.LOCKS_FILE)
async def _save_locks(self, locks: Dict[str, Dict]) -> None:
await self._storage.write_json(self.LOCKS_FILE, locks)
def _is_expired(self, lock_data: Dict) -> bool:
acquired_at = datetime.fromisoformat(lock_data["acquired_at"])
return (datetime.now() - acquired_at).total_seconds() >= self.LOCK_TIMEOUT
async def _cleanup_expired(self, locks: Dict[str, Dict]) -> Dict[str, Dict]:
return {k: v for k, v in locks.items() if not self._is_expired(v)}
async def acquire_lock(self, file_path: str, agent_id: str, agent_name: str = "") -> bool:
async with self._lock:
locks = await self._cleanup_expired(await self._load_locks())
if file_path in locks and locks[file_path]["agent_id"] != agent_id:
return False
locks[file_path] = asdict(LockInfo(
file_path=file_path,
agent_id=agent_id,
acquired_at=datetime.now().isoformat(),
agent_name=agent_name
))
await self._save_locks(locks)
return True
async def release_lock(self, file_path: str, agent_id: str) -> bool:
async with self._lock:
locks = await self._load_locks()
if file_path not in locks or locks[file_path]["agent_id"] != agent_id:
return False
del locks[file_path]
await self._save_locks(locks)
return True
async def get_locks(self) -> List[LockInfo]:
locks = await self._cleanup_expired(await self._load_locks())
return [LockInfo(**data) for data in locks.values()]
async def check_locked(self, file_path: str) -> Optional[str]:
locks = await self._cleanup_expired(await self._load_locks())
return locks.get(file_path, {}).get("agent_id")
async def get_agent_locks(self, agent_id: str) -> List[LockInfo]:
return [lock for lock in await self.get_locks() if lock.agent_id == agent_id]
async def release_all_agent_locks(self, agent_id: str) -> int:
async with self._lock:
locks = await self._load_locks()
to_remove = [k for k, v in locks.items() if v["agent_id"] == agent_id]
for k in to_remove:
del locks[k]
await self._save_locks(locks)
return len(to_remove)
# 简化单例实现
_file_lock_service: Optional[FileLockService] = None
def get_file_lock_service() -> FileLockService:
global _file_lock_service
if _file_lock_service is None:
_file_lock_service = FileLockService()
return _file_lock_service

View File

@@ -0,0 +1,212 @@
"""
心跳服务 - 管理 Agent 心跳和超时检测
用于监控 Agent 活跃状态和检测掉线 Agent
"""
import asyncio
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from dataclasses import dataclass, asdict
from .storage import get_storage
@dataclass
class HeartbeatInfo:
"""心跳信息"""
agent_id: str
last_heartbeat: str # 最后心跳时间 (ISO format)
status: str # Agent 状态working, waiting, idle, error
current_task: str = "" # 当前任务描述
progress: int = 0 # 任务进度 0-100
@property
def elapsed_seconds(self) -> int:
"""距最后心跳的秒数"""
last_time = datetime.fromisoformat(self.last_heartbeat)
return int((datetime.now() - last_time).total_seconds())
def is_timeout(self, timeout_seconds: int = 60) -> bool:
"""是否超时"""
return self.elapsed_seconds > timeout_seconds
@property
def elapsed_display(self) -> str:
"""格式化的时间差"""
seconds = self.elapsed_seconds
if seconds < 10:
return f"{seconds}s ago"
elif seconds < 60:
return f"{seconds}s"
minutes = seconds // 60
secs = seconds % 60
return f"{minutes}m {secs:02d}s"
class HeartbeatService:
"""
心跳服务
管理所有 Agent 的心跳记录,检测超时 Agent
"""
HEARTBEATS_FILE = "cache/heartbeats.json"
DEFAULT_TIMEOUT = 60 # 默认超时时间(秒)
def __init__(self):
self._storage = get_storage()
self._lock = asyncio.Lock()
async def _load_heartbeats(self) -> Dict[str, Dict]:
"""加载心跳记录"""
return await self._storage.read_json(self.HEARTBEATS_FILE)
async def _save_heartbeats(self, heartbeats: Dict[str, Dict]) -> None:
"""保存心跳记录"""
await self._storage.write_json(self.HEARTBEATS_FILE, heartbeats)
async def update_heartbeat(
self,
agent_id: str,
status: str,
current_task: str = "",
progress: int = 0
) -> None:
"""
更新 Agent 心跳
Args:
agent_id: Agent ID
status: 状态 (working, waiting, idle, error)
current_task: 当前任务
progress: 进度 0-100
"""
async with self._lock:
heartbeats = await self._load_heartbeats()
heartbeat_info = HeartbeatInfo(
agent_id=agent_id,
last_heartbeat=datetime.now().isoformat(),
status=status,
current_task=current_task,
progress=progress
)
heartbeats[agent_id] = asdict(heartbeat_info)
await self._save_heartbeats(heartbeats)
async def get_heartbeat(self, agent_id: str) -> Optional[HeartbeatInfo]:
"""
获取指定 Agent 的心跳信息
Args:
agent_id: Agent ID
Returns:
心跳信息,如果不存在返回 None
"""
heartbeats = await self._load_heartbeats()
data = heartbeats.get(agent_id)
if data:
return HeartbeatInfo(**data)
return None
async def get_all_heartbeats(self) -> Dict[str, HeartbeatInfo]:
"""
获取所有 Agent 的心跳信息
Returns:
Agent ID -> 心跳信息 的字典
"""
heartbeats = await self._load_heartbeats()
result = {}
for agent_id, data in heartbeats.items():
result[agent_id] = HeartbeatInfo(**data)
return result
async def check_timeout(self, timeout_seconds: int = None) -> List[str]:
"""
检查超时的 Agent
Args:
timeout_seconds: 超时秒数,默认使用 DEFAULT_TIMEOUT
Returns:
超时的 Agent ID 列表
"""
if timeout_seconds is None:
timeout_seconds = self.DEFAULT_TIMEOUT
all_heartbeats = await self.get_all_heartbeats()
timeout_agents = []
for agent_id, heartbeat in all_heartbeats.items():
if heartbeat.is_timeout(timeout_seconds):
timeout_agents.append(agent_id)
return timeout_agents
async def remove_heartbeat(self, agent_id: str) -> bool:
"""
移除 Agent 的心跳记录
Args:
agent_id: Agent ID
Returns:
是否成功移除
"""
async with self._lock:
heartbeats = await self._load_heartbeats()
if agent_id in heartbeats:
del heartbeats[agent_id]
await self._save_heartbeats(heartbeats)
return True
return False
async def get_active_agents(self, within_seconds: int = 120) -> List[str]:
"""
获取活跃的 Agent 列表
Args:
within_seconds: 活跃判定时间窗口(秒)
Returns:
活跃 Agent ID 列表
"""
all_heartbeats = await self.get_all_heartbeats()
active_agents = []
for agent_id, heartbeat in all_heartbeats.items():
if heartbeat.elapsed_seconds <= within_seconds:
active_agents.append(agent_id)
return active_agents
async def get_agents_by_status(self, status: str) -> List[HeartbeatInfo]:
"""
获取指定状态的所有 Agent
Args:
status: 状态 (working, waiting, idle, error)
Returns:
符合条件的 Agent 心跳信息列表
"""
all_heartbeats = await self.get_all_heartbeats()
return [
hb for hb in all_heartbeats.values()
if hb.status == status
]
# 全局单例
_heartbeat_service_instance: Optional[HeartbeatService] = None
def get_heartbeat_service() -> HeartbeatService:
"""获取心跳服务单例"""
global _heartbeat_service_instance
if _heartbeat_service_instance is None:
_heartbeat_service_instance = HeartbeatService()
return _heartbeat_service_instance

View File

@@ -0,0 +1,378 @@
"""
人类输入服务 - 管理人类参与者的任务请求和会议评论
"""
import asyncio
import uuid
from datetime import datetime
from typing import Dict, List, Optional
from dataclasses import dataclass, field, asdict
from .storage import get_storage
@dataclass
class TaskRequest:
"""人类任务请求"""
id: str
from_user: str # 提交者 ID
timestamp: str # 提交时间
priority: str # high | medium | low
type: str # 任务类型
title: str = "" # 任务标题
content: str = "" # 任务内容
target_files: List[str] = field(default_factory=list)
suggested_agent: str = "" # 建议的 Agent
urgent: bool = False
status: str = "pending" # pending | processing | completed | rejected
@property
def is_urgent(self) -> bool:
"""是否为紧急任务(高优先级 + urgent 标记)"""
return self.priority == "high" and self.urgent
@dataclass
class MeetingComment:
"""会议评论"""
id: str
from_user: str # 提交者 ID
meeting_id: str
timestamp: str
type: str # proposal | question | correction
priority: str = "normal"
content: str = ""
status: str = "pending" # pending | addressed | ignored
@dataclass
class HumanParticipant:
"""人类参与者信息"""
id: str
name: str
role: str = "" # tech_lead | product_owner | developer
status: str = "offline" # online | offline | busy
avatar: str = "👤"
class HumanInputService:
"""
人类输入服务
管理 humans.json 文件,处理人类任务请求和会议评论
"""
HUMANS_FILE = "humans.json"
# 优先级权重
PRIORITY_WEIGHT = {
"high": 3,
"medium": 2,
"low": 1,
"normal": 0
}
def __init__(self):
self._storage = get_storage()
self._lock = asyncio.Lock()
async def _load_humans(self) -> Dict:
"""加载 humans.json"""
return await self._storage.read_json(self.HUMANS_FILE)
async def _save_humans(self, data: Dict) -> None:
"""保存 humans.json"""
await self._storage.write_json(self.HUMANS_FILE, data)
async def _ensure_structure(self) -> None:
"""确保 humans.json 结构完整"""
async with self._lock:
data = await self._load_humans()
if not data:
data = {
"version": "1.0",
"last_updated": datetime.now().isoformat(),
"participants": {},
"task_requests": [],
"meeting_comments": [],
"human_states": {}
}
await self._save_humans(data)
async def register_participant(
self,
user_id: str,
name: str,
role: str = "",
avatar: str = "👤"
) -> None:
"""注册人类参与者"""
await self._ensure_structure()
async with self._lock:
data = await self._load_humans()
data["participants"][user_id] = asdict(HumanParticipant(
id=user_id,
name=name,
role=role,
avatar=avatar
))
data["last_updated"] = datetime.now().isoformat()
await self._save_humans(data)
async def add_task_request(
self,
from_user: str,
content: str,
priority: str = "medium",
task_type: str = "user_task",
title: str = "",
target_files: List[str] = None,
suggested_agent: str = "",
urgent: bool = False
) -> str:
"""
添加任务请求
Returns:
任务 ID
"""
await self._ensure_structure()
async with self._lock:
data = await self._load_humans()
task_id = f"req_{uuid.uuid4().hex[:8]}"
task = TaskRequest(
id=task_id,
from_user=from_user,
timestamp=datetime.now().isoformat(),
priority=priority,
type=task_type,
title=title,
content=content,
target_files=target_files or [],
suggested_agent=suggested_agent,
urgent=urgent
)
# 保存时转换为 JSON 兼容格式from_user -> from
task_dict = asdict(task)
task_dict["from"] = task_dict.pop("from_user")
data["task_requests"].append(task_dict)
data["last_updated"] = datetime.now().isoformat()
await self._save_humans(data)
return task_id
async def get_pending_tasks(
self,
priority_filter: str = None,
agent_filter: str = None
) -> List[TaskRequest]:
"""
获取待处理的任务请求
Args:
priority_filter: 过滤优先级
agent_filter: 过滤建议的 Agent
Returns:
按优先级排序的任务列表
"""
await self._ensure_structure()
data = await self._load_humans()
tasks = []
for t in data.get("task_requests", []):
if t["status"] != "pending":
continue
if priority_filter and t["priority"] != priority_filter:
continue
if agent_filter and t.get("suggested_agent") != agent_filter:
continue
# 转换 JSON 格式from -> from_user
t_dict = t.copy()
t_dict["from_user"] = t_dict.pop("from", "")
tasks.append(TaskRequest(**t_dict))
# 按优先级排序
tasks.sort(
key=lambda t: (
-self.PRIORITY_WEIGHT.get(t.priority, 0),
-t.urgent,
t.timestamp
)
)
return tasks
async def get_urgent_tasks(self) -> List[TaskRequest]:
"""获取紧急任务(高优先级 + urgent"""
return [t for t in await self.get_pending_tasks() if t.is_urgent]
async def mark_task_processing(self, task_id: str) -> bool:
"""标记任务为处理中"""
async with self._lock:
data = await self._load_humans()
for task in data.get("task_requests", []):
if task["id"] == task_id:
task["status"] = "processing"
data["last_updated"] = datetime.now().isoformat()
await self._save_humans(data)
return True
return False
async def mark_task_completed(self, task_id: str) -> bool:
"""标记任务为已完成"""
async with self._lock:
data = await self._load_humans()
for task in data.get("task_requests", []):
if task["id"] == task_id:
task["status"] = "completed"
data["last_updated"] = datetime.now().isoformat()
await self._save_humans(data)
return True
return False
async def add_meeting_comment(
self,
from_user: str,
meeting_id: str,
content: str,
comment_type: str = "proposal",
priority: str = "normal"
) -> str:
"""
添加会议评论
Returns:
评论 ID
"""
await self._ensure_structure()
async with self._lock:
data = await self._load_humans()
comment_id = f"comment_{uuid.uuid4().hex[:8]}"
comment = MeetingComment(
id=comment_id,
from_user=from_user,
meeting_id=meeting_id,
timestamp=datetime.now().isoformat(),
type=comment_type,
priority=priority,
content=content
)
# 保存时转换为 JSON 兼容格式from_user -> from
comment_dict = asdict(comment)
comment_dict["from"] = comment_dict.pop("from_user")
data["meeting_comments"].append(comment_dict)
data["last_updated"] = datetime.now().isoformat()
await self._save_humans(data)
return comment_id
async def get_pending_comments(self, meeting_id: str = None) -> List[MeetingComment]:
"""
获取待处理的会议评论
Args:
meeting_id: 过滤指定会议的评论
Returns:
评论列表
"""
await self._ensure_structure()
data = await self._load_humans()
comments = []
for c in data.get("meeting_comments", []):
if c["status"] != "pending":
continue
if meeting_id and c["meeting_id"] != meeting_id:
continue
# 转换 JSON 格式from -> from_user
c_dict = c.copy()
c_dict["from_user"] = c_dict.pop("from", "")
comments.append(MeetingComment(**c_dict))
return comments
async def mark_comment_addressed(self, comment_id: str) -> bool:
"""标记评论为已处理"""
async with self._lock:
data = await self._load_humans()
for comment in data["meeting_comments"]:
if comment["id"] == comment_id:
comment["status"] = "addressed"
data["last_updated"] = datetime.now().isoformat()
await self._save_humans(data)
return True
return False
async def get_participants(self) -> List[HumanParticipant]:
"""获取所有人类参与者"""
await self._ensure_structure()
data = await self._load_humans()
participants = []
for p in data.get("participants", {}).values():
participants.append(HumanParticipant(**p))
return participants
async def update_user_status(
self,
user_id: str,
status: str,
current_focus: str = ""
) -> bool:
"""更新用户状态"""
await self._ensure_structure()
async with self._lock:
data = await self._load_humans()
if user_id not in data.get("participants", {}):
return False
data["participants"][user_id]["status"] = status
if "human_states" not in data:
data["human_states"] = {}
data["human_states"][user_id] = {
"status": status,
"current_focus": current_focus,
"last_update": datetime.now().isoformat()
}
data["last_updated"] = datetime.now().isoformat()
await self._save_humans(data)
return True
async def get_summary(self) -> Dict:
"""获取人类输入服务的摘要信息"""
await self._ensure_structure()
data = await self._load_humans()
pending_tasks = await self.get_pending_tasks()
urgent_tasks = await self.get_urgent_tasks()
pending_comments = await self.get_pending_comments()
participants = await self.get_participants()
return {
"participants": len(participants),
"online_users": len([p for p in participants if p.status == "online"]),
"pending_tasks": len(pending_tasks),
"urgent_tasks": len(urgent_tasks),
"pending_comments": len(pending_comments),
"last_updated": data.get("last_updated", "")
}
# 全局单例
_human_input_service: Optional[HumanInputService] = None
def get_human_input_service() -> HumanInputService:
"""获取人类输入服务单例"""
global _human_input_service
if _human_input_service is None:
_human_input_service = HumanInputService()
return _human_input_service

View File

@@ -0,0 +1,669 @@
"""
LLM 服务层
提供统一的 LLM 调用接口,支持多个提供商:
- Anthropic (Claude)
- OpenAI (GPT)
- DeepSeek
- Ollama (本地模型)
- Google (Gemini)
"""
import os
import json
import asyncio
import logging
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Any, Union
from dataclasses import dataclass
from enum import Enum
import time
logger = logging.getLogger(__name__)
class TaskType(Enum):
"""任务类型分类"""
COMPLEX_REASONING = "complex_reasoning"
CODE_GENERATION = "code_generation"
CODE_REVIEW = "code_review"
SIMPLE_TASK = "simple_task"
COST_SENSITIVE = "cost_sensitive"
LOCAL_PRIVACY = "local_privacy"
MULTIMODAL = "multimodal"
ARCHITECTURE_DESIGN = "architecture_design"
TEST_GENERATION = "test_generation"
@dataclass
class LLMMessage:
"""LLM 消息"""
role: str # system, user, assistant
content: str
images: Optional[List[str]] = None
@dataclass
class LLMResponse:
"""LLM 响应"""
content: str
model: str
provider: str
tokens_used: int = 0
finish_reason: str = ""
latency: float = 0.0
@dataclass
class LLMConfig:
"""LLM 配置"""
# Anthropic
anthropic_api_key: Optional[str] = None
anthropic_base_url: str = "https://api.anthropic.com"
# OpenAI
openai_api_key: Optional[str] = None
openai_base_url: str = "https://api.openai.com/v1"
# DeepSeek
deepseek_api_key: Optional[str] = None
deepseek_base_url: str = "https://api.deepseek.com"
# Google
google_api_key: Optional[str] = None
# Ollama
ollama_base_url: str = "http://localhost:11434"
# 通用设置
default_timeout: int = 120
max_retries: int = 3
temperature: float = 0.7
max_tokens: int = 4096
@classmethod
def from_env(cls) -> "LLMConfig":
"""从环境变量加载配置"""
return cls(
anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
openai_api_key=os.getenv("OPENAI_API_KEY"),
deepseek_api_key=os.getenv("DEEPSEEK_API_KEY"),
google_api_key=os.getenv("GOOGLE_API_KEY"),
ollama_base_url=os.getenv("OLLAMA_BASE_URL", "http://localhost:11434"),
default_timeout=int(os.getenv("LLM_TIMEOUT", "120")),
max_retries=int(os.getenv("LLM_MAX_RETRIES", "3")),
temperature=float(os.getenv("LLM_TEMPERATURE", "0.7")),
max_tokens=int(os.getenv("LLM_MAX_TOKENS", "4096"))
)
class LLMProvider(ABC):
"""LLM 提供商抽象基类"""
def __init__(self, config: LLMConfig):
self.config = config
@property
@abstractmethod
def provider_name(self) -> str:
"""提供商名称"""
pass
@abstractmethod
async def chat_completion(
self,
model: str,
messages: List[LLMMessage],
temperature: float = None,
max_tokens: int = None,
**kwargs
) -> LLMResponse:
"""聊天补全"""
pass
@abstractmethod
async def stream_completion(
self,
model: str,
messages: List[LLMMessage],
**kwargs
):
"""流式补全"""
pass
@abstractmethod
def get_available_models(self) -> List[str]:
"""获取可用模型列表"""
pass
async def _retry_with_backoff(self, func, *args, **kwargs):
"""带退避的重试机制"""
last_error = None
for attempt in range(self.config.max_retries):
try:
return await func(*args, **kwargs)
except Exception as e:
last_error = e
if attempt < self.config.max_retries - 1:
wait_time = 2 ** attempt
logger.warning(f"Attempt {attempt + 1} failed, retrying in {wait_time}s: {e}")
await asyncio.sleep(wait_time)
else:
logger.error(f"All {self.config.max_retries} attempts failed")
raise last_error
class AnthropicProvider(LLMProvider):
"""Anthropic Claude 提供商"""
@property
def provider_name(self) -> str:
return "anthropic"
def __init__(self, config: LLMConfig):
super().__init__(config)
self._client = None
def _get_client(self):
"""懒加载客户端"""
if self._client is None:
try:
import anthropic
self._client = anthropic.AsyncAnthropic(
api_key=self.config.anthropic_api_key,
base_url=self.config.anthropic_base_url,
timeout=self.config.default_timeout
)
except ImportError:
raise ImportError("请安装 anthropic 包: pip install anthropic")
return self._client
async def chat_completion(
self,
model: str,
messages: List[LLMMessage],
temperature: float = None,
max_tokens: int = None,
**kwargs
) -> LLMResponse:
start_time = time.time()
# 分离系统消息
system_message = ""
user_messages = []
for msg in messages:
if msg.role == "system":
system_message = msg.content
else:
user_messages.append({
"role": msg.role,
"content": msg.content
})
client = self._get_client()
response = await self._retry_with_backoff(
client.messages.create,
model=model,
system=system_message if system_message else None,
messages=user_messages,
temperature=temperature or self.config.temperature,
max_tokens=max_tokens or self.config.max_tokens,
**kwargs
)
latency = time.time() - start_time
return LLMResponse(
content=response.content[0].text,
model=model,
provider=self.provider_name,
tokens_used=response.usage.input_tokens + response.usage.output_tokens,
finish_reason=response.stop_reason,
latency=latency
)
async def stream_completion(self, model: str, messages: List[LLMMessage], **kwargs):
client = self._get_client()
system_message = ""
user_messages = []
for msg in messages:
if msg.role == "system":
system_message = msg.content
else:
user_messages.append({"role": msg.role, "content": msg.content})
async with client.messages.stream(
model=model,
system=system_message if system_message else None,
messages=user_messages,
max_tokens=self.config.max_tokens,
**kwargs
) as stream:
async for text in stream.text_stream:
yield text
def get_available_models(self) -> List[str]:
return [
"claude-opus-4.6",
"claude-sonnet-4.6",
"claude-haiku-4.6",
"claude-3-5-sonnet-20241022",
"claude-3-5-haiku-20241022"
]
class OpenAIProvider(LLMProvider):
"""OpenAI GPT 提供商"""
@property
def provider_name(self) -> str:
return "openai"
def __init__(self, config: LLMConfig):
super().__init__(config)
self._client = None
def _get_client(self):
if self._client is None:
try:
import openai
self._client = openai.AsyncOpenAI(
api_key=self.config.openai_api_key,
base_url=self.config.openai_base_url,
timeout=self.config.default_timeout
)
except ImportError:
raise ImportError("请安装 openai 包: pip install openai")
return self._client
async def chat_completion(
self,
model: str,
messages: List[LLMMessage],
temperature: float = None,
max_tokens: int = None,
**kwargs
) -> LLMResponse:
start_time = time.time()
client = self._get_client()
api_messages = [
{"role": msg.role, "content": msg.content}
for msg in messages
]
response = await self._retry_with_backoff(
client.chat.completions.create,
model=model,
messages=api_messages,
temperature=temperature or self.config.temperature,
max_tokens=max_tokens or self.config.max_tokens,
**kwargs
)
latency = time.time() - start_time
return LLMResponse(
content=response.choices[0].message.content,
model=model,
provider=self.provider_name,
tokens_used=response.usage.total_tokens,
finish_reason=response.choices[0].finish_reason,
latency=latency
)
async def stream_completion(self, model: str, messages: List[LLMMessage], **kwargs):
client = self._get_client()
api_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
stream = await client.chat.completions.create(
model=model,
messages=api_messages,
stream=True,
**kwargs
)
async for chunk in stream:
if chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
def get_available_models(self) -> List[str]:
return [
"gpt-4o",
"gpt-4o-mini",
"gpt-4-turbo",
"gpt-3.5-turbo"
]
class DeepSeekProvider(LLMProvider):
"""DeepSeek 提供商"""
@property
def provider_name(self) -> str:
return "deepseek"
def __init__(self, config: LLMConfig):
super().__init__(config)
self._client = None
def _get_client(self):
if self._client is None:
try:
import openai
self._client = openai.AsyncOpenAI(
api_key=self.config.deepseek_api_key,
base_url=self.config.deepseek_base_url,
timeout=self.config.default_timeout
)
except ImportError:
raise ImportError("请安装 openai 包: pip install openai")
return self._client
async def chat_completion(
self,
model: str,
messages: List[LLMMessage],
temperature: float = None,
max_tokens: int = None,
**kwargs
) -> LLMResponse:
start_time = time.time()
client = self._get_client()
api_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
response = await self._retry_with_backoff(
client.chat.completions.create,
model=model,
messages=api_messages,
temperature=temperature or self.config.temperature,
max_tokens=max_tokens or self.config.max_tokens,
**kwargs
)
latency = time.time() - start_time
return LLMResponse(
content=response.choices[0].message.content,
model=model,
provider=self.provider_name,
tokens_used=response.usage.total_tokens,
finish_reason=response.choices[0].finish_reason,
latency=latency
)
async def stream_completion(self, model: str, messages: List[LLMMessage], **kwargs):
client = self._get_client()
api_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
stream = await client.chat.completions.create(
model=model,
messages=api_messages,
stream=True,
**kwargs
)
async for chunk in stream:
if chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
def get_available_models(self) -> List[str]:
return [
"deepseek-chat",
"deepseek-coder"
]
class OllamaProvider(LLMProvider):
"""Ollama 本地模型提供商"""
@property
def provider_name(self) -> str:
return "ollama"
def __init__(self, config: LLMConfig):
super().__init__(config)
self._base_url = config.ollama_base_url
async def chat_completion(
self,
model: str,
messages: List[LLMMessage],
temperature: float = None,
max_tokens: int = None,
**kwargs
) -> LLMResponse:
import aiohttp
start_time = time.time()
api_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
payload = {
"model": model,
"messages": api_messages,
"stream": False,
"options": {
"temperature": temperature or self.config.temperature,
"num_predict": max_tokens or self.config.max_tokens
}
}
async with aiohttp.ClientSession() as session:
async with session.post(
f"{self._base_url}/api/chat",
json=payload,
timeout=self.config.default_timeout
) as response:
result = await response.json()
latency = time.time() - start_time
return LLMResponse(
content=result.get("message", {}).get("content", ""),
model=model,
provider=self.provider_name,
tokens_used=result.get("prompt_eval_count", 0) + result.get("eval_count", 0),
latency=latency
)
async def stream_completion(self, model: str, messages: List[LLMMessage], **kwargs):
import aiohttp
api_messages = [{"role": msg.role, "content": msg.content} for msg in messages]
payload = {
"model": model,
"messages": api_messages,
"stream": True
}
async with aiohttp.ClientSession() as session:
async with session.post(
f"{self._base_url}/api/chat",
json=payload
) as response:
async for line in response.content:
if line:
data = json.loads(line)
if "message" in data:
yield data["message"].get("content", "")
def get_available_models(self) -> List[str]:
return ["llama3", "llama3.2", "mistral", "codellama", "deepseek-coder"]
class ModelRouter:
"""
智能模型路由器
根据任务类型和需求自动选择最合适的模型
"""
# 默认路由规则
ROUTING_RULES = {
TaskType.COMPLEX_REASONING: ("anthropic", "claude-opus-4.6"),
TaskType.CODE_GENERATION: ("anthropic", "claude-sonnet-4.6"),
TaskType.CODE_REVIEW: ("anthropic", "claude-sonnet-4.6"),
TaskType.ARCHITECTURE_DESIGN: ("anthropic", "claude-opus-4.6"),
TaskType.TEST_GENERATION: ("anthropic", "claude-sonnet-4.6"),
TaskType.SIMPLE_TASK: ("anthropic", "claude-haiku-4.6"),
TaskType.COST_SENSITIVE: ("deepseek", "deepseek-chat"),
TaskType.LOCAL_PRIVACY: ("ollama", "llama3"),
}
def __init__(self, config: LLMConfig = None):
self.config = config or LLMConfig.from_env()
self.providers: Dict[str, LLMProvider] = {}
self._initialize_providers()
def _initialize_providers(self):
"""初始化可用的提供商"""
if self.config.anthropic_api_key:
self.providers["anthropic"] = AnthropicProvider(self.config)
if self.config.openai_api_key:
self.providers["openai"] = OpenAIProvider(self.config)
if self.config.deepseek_api_key:
self.providers["deepseek"] = DeepSeekProvider(self.config)
# Ollama 总是可用(本地服务)
self.providers["ollama"] = OllamaProvider(self.config)
def classify_task(self, task_description: str) -> TaskType:
"""
分析任务描述,分类任务类型
使用关键词匹配和启发式规则
"""
task_lower = task_description.lower()
# 检查关键词
keywords_map = {
TaskType.ARCHITECTURE_DESIGN: ["架构", "设计", "系统设计", "技术选型", "架构图"],
TaskType.CODE_GENERATION: ["实现", "编写", "生成代码", "开发", "创建函数"],
TaskType.CODE_REVIEW: ["审查", "review", "检查", "分析代码"],
TaskType.TEST_GENERATION: ["测试", "单元测试", "测试用例"],
TaskType.COMPLEX_REASONING: ["分析", "推理", "判断", "复杂", "评估"],
}
# 计算匹配分数
scores = {}
for task_type, keywords in keywords_map.items():
score = sum(1 for kw in keywords if kw in task_lower)
if score > 0:
scores[task_type] = score
# 返回最高分的类型
if scores:
return max(scores, key=scores.get)
# 默认返回简单任务
return TaskType.SIMPLE_TASK
def get_route(self, task_type: TaskType, preferred_provider: str = None) -> tuple:
"""
获取路由决策
返回: (provider_name, model_name)
"""
# 如果指定了提供商,尝试使用
if preferred_provider and preferred_provider in self.providers:
provider = self.providers[preferred_provider]
models = provider.get_available_models()
if models:
return preferred_provider, models[0]
# 使用路由规则
if task_type in self.ROUTING_RULES:
provider_name, model_name = self.ROUTING_RULES[task_type]
if provider_name in self.providers:
return provider_name, model_name
# 回退到第一个可用的提供商
for provider_name, provider in self.providers.items():
models = provider.get_available_models()
if models:
return provider_name, models[0]
raise RuntimeError("没有可用的 LLM 提供商")
async def route_task(
self,
task: str,
messages: List[LLMMessage] = None,
preferred_model: str = None,
preferred_provider: str = None,
**kwargs
) -> LLMResponse:
"""
智能路由任务到合适的模型
参数:
task: 任务描述
messages: 消息列表(如果为 None会自动从 task 创建)
preferred_model: 首选模型
preferred_provider: 首选提供商
"""
# 如果指定了首选模型,尝试直接使用
if preferred_model:
if "-" in preferred_model:
# 从模型名推断提供商
if preferred_model.startswith("claude"):
provider_name = "anthropic"
elif preferred_model.startswith("gpt"):
provider_name = "openai"
elif preferred_model.startswith("deepseek"):
provider_name = "deepseek"
else:
provider_name = preferred_provider or "anthropic"
if provider_name in self.providers:
provider = self.providers[provider_name]
return await provider.chat_completion(
preferred_model,
messages or [LLMMessage(role="user", content=task)],
**kwargs
)
# 分类任务类型
task_type = self.classify_task(task)
provider_name, model_name = self.get_route(task_type, preferred_provider)
logger.info(f"路由任务: {task_type.value} -> {provider_name}/{model_name}")
provider = self.providers[provider_name]
return await provider.chat_completion(
model_name,
messages or [LLMMessage(role="user", content=task)],
**kwargs
)
def get_available_providers(self) -> List[str]:
"""获取所有可用的提供商"""
return list(self.providers.keys())
def get_provider_models(self, provider_name: str) -> List[str]:
"""获取指定提供商的可用模型"""
if provider_name in self.providers:
return self.providers[provider_name].get_available_models()
return []
# 单例获取函数
_llm_service: Optional[ModelRouter] = None
def get_llm_service(config: LLMConfig = None) -> ModelRouter:
"""获取 LLM 服务单例"""
global _llm_service
if _llm_service is None:
_llm_service = ModelRouter(config or LLMConfig.from_env())
return _llm_service
def reset_llm_service():
"""重置 LLM 服务(主要用于测试)"""
global _llm_service
_llm_service = None

View File

@@ -0,0 +1,404 @@
"""
会议记录服务 - 记录会议内容、讨论和共识
将会议记录保存为 Markdown 文件,按日期组织
"""
import asyncio
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass, field, asdict
from .storage import get_storage
@dataclass
class DiscussionEntry:
"""单条讨论记录"""
agent_id: str
agent_name: str
content: str
timestamp: str = ""
step: str = ""
def __post_init__(self):
if not self.timestamp:
self.timestamp = datetime.now().isoformat()
@dataclass
class ProgressStep:
"""会议进度步骤"""
step_id: str
label: str
status: str = "pending" # pending, active, completed
completed_at: str = ""
def mark_active(self):
self.status = "active"
def mark_completed(self):
self.status = "completed"
self.completed_at = datetime.now().isoformat()
@dataclass
class MeetingInfo:
"""会议信息"""
meeting_id: str
title: str
date: str # YYYY-MM-DD
attendees: List[str] = field(default_factory=list)
steps: List[ProgressStep] = field(default_factory=list)
discussions: List[DiscussionEntry] = field(default_factory=list)
status: str = "in_progress" # in_progress, completed
created_at: str = ""
ended_at: str = ""
consensus: str = "" # 最终共识
def __post_init__(self):
if not self.created_at:
self.created_at = datetime.now().isoformat()
if not self.date:
self.date = datetime.now().strftime("%Y-%m-%d")
@property
def current_step(self) -> Optional[ProgressStep]:
"""获取当前活跃的步骤"""
for step in self.steps:
if step.status == "active":
return step
return None
@property
def completed_steps(self) -> List[ProgressStep]:
"""获取已完成的步骤"""
return [s for s in self.steps if s.status == "completed"]
@property
def progress_summary(self) -> str:
"""进度摘要"""
total = len(self.steps)
if total == 0:
return "0/0"
completed = len(self.completed_steps)
active = 1 if self.current_step else 0
return f"{completed}/{total}" + (" (active)" if active else "")
class MeetingRecorder:
"""
会议记录服务
记录会议的讨论内容、进度和共识,保存为 Markdown 文件
"""
def __init__(self):
self._storage = get_storage()
self._lock = asyncio.Lock()
def _parse_meeting_data(self, data: dict) -> MeetingInfo:
"""将字典数据转换为 MeetingInfo 对象"""
# 转换 steps
steps = [
ProgressStep(**s) if isinstance(s, dict) else s
for s in data.get("steps", [])
]
# 转换 discussions
discussions = [
DiscussionEntry(**d) if isinstance(d, dict) else d
for d in data.get("discussions", [])
]
# 创建 MeetingInfo
data["steps"] = steps
data["discussions"] = discussions
return MeetingInfo(**data)
def _get_meeting_dir(self, date: str) -> str:
"""获取会议目录路径"""
return f"meetings/{date}"
def _get_meeting_file(self, meeting_id: str, date: str) -> str:
"""获取会议文件路径"""
return f"{self._get_meeting_dir(date)}/{meeting_id}.md"
async def create_meeting(
self,
meeting_id: str,
title: str,
attendees: List[str],
steps: List[str] = None
) -> MeetingInfo:
"""
创建新会议记录
Args:
meeting_id: 会议 ID
title: 会议标题
attendees: 参会者列表
steps: 会议步骤列表
Returns:
创建的会议信息
"""
async with self._lock:
date = datetime.now().strftime("%Y-%m-%d")
# 创建进度步骤
progress_steps = []
if steps:
for i, step_label in enumerate(steps):
progress_steps.append(ProgressStep(
step_id=f"step_{i+1}",
label=step_label,
status="pending"
))
meeting = MeetingInfo(
meeting_id=meeting_id,
title=title,
date=date,
attendees=attendees,
steps=progress_steps
)
# 保存为 Markdown
await self._save_meeting_markdown(meeting)
return meeting
async def _save_meeting_markdown(self, meeting: MeetingInfo) -> None:
"""将会议保存为 Markdown 文件"""
lines = [
f"# {meeting.title}",
"",
f"**会议 ID**: {meeting.meeting_id}",
f"**日期**: {meeting.date}",
f"**状态**: {meeting.status}",
f"**参会者**: {', '.join(meeting.attendees)}",
"",
"## 会议进度",
"",
]
# 进度步骤
for step in meeting.steps:
status_icon = {
"pending": "",
"active": "",
"completed": ""
}.get(step.status, "")
time_str = f" ({step.completed_at})" if step.completed_at else ""
lines.append(f"- {status_icon} **{step.label}**{time_str}")
lines.append("")
lines.append("## 讨论记录")
lines.append("")
# 讨论内容
for discussion in meeting.discussions:
lines.append(f"### {discussion.agent_name} - {discussion.timestamp[:19]}")
if discussion.step:
lines.append(f"*步骤: {discussion.step}*")
lines.append("")
lines.append(discussion.content)
lines.append("")
# 共识
if meeting.consensus:
lines.append("## 共识")
lines.append("")
lines.append(meeting.consensus)
lines.append("")
# 元数据
lines.append("---")
lines.append("")
lines.append(f"**创建时间**: {meeting.created_at}")
if meeting.ended_at:
lines.append(f"**结束时间**: {meeting.ended_at}")
content = "\n".join(lines)
file_path = self._get_meeting_file(meeting.meeting_id, meeting.date)
# 使用存储服务保存
await self._storage.ensure_dir(self._get_meeting_dir(meeting.date))
await self._storage.write_json(file_path.replace(".md", ".json"), asdict(meeting))
# 同时保存 Markdown
import aiofiles
full_path = Path(self._storage.base_path) / file_path
async with aiofiles.open(full_path, mode="w", encoding="utf-8") as f:
await f.write(content)
async def add_discussion(
self,
meeting_id: str,
agent_id: str,
agent_name: str,
content: str,
step: str = ""
) -> None:
"""
添加讨论记录
Args:
meeting_id: 会议 ID
agent_id: Agent ID
agent_name: Agent 名称
content: 讨论内容
step: 当前步骤
"""
async with self._lock:
# 加载会议信息
date = datetime.now().strftime("%Y-%m-%d")
file_path = self._get_meeting_file(meeting_id, date)
json_path = file_path.replace(".md", ".json")
data = await self._storage.read_json(json_path)
if not data:
return # 会议不存在
meeting = self._parse_meeting_data(data)
meeting.discussions.append(DiscussionEntry(
agent_id=agent_id,
agent_name=agent_name,
content=content,
step=step
))
# 保存
await self._save_meeting_markdown(meeting)
async def update_progress(
self,
meeting_id: str,
step_label: str
) -> None:
"""
更新会议进度
Args:
meeting_id: 会议 ID
step_label: 步骤名称
"""
async with self._lock:
date = datetime.now().strftime("%Y-%m-%d")
json_path = self._get_meeting_file(meeting_id, date).replace(".md", ".json")
data = await self._storage.read_json(json_path)
if not data:
return
meeting = self._parse_meeting_data(data)
# 查找并更新步骤
step_found = False
for step in meeting.steps:
if step.label == step_label:
# 将之前的活跃步骤标记为完成
if meeting.current_step:
meeting.current_step.mark_completed()
step.mark_active()
step_found = True
break
if not step_found and meeting.steps:
# 如果找不到,将第一个 pending 步骤设为活跃
for step in meeting.steps:
if step.status == "pending":
if meeting.current_step:
meeting.current_step.mark_completed()
step.mark_active()
break
await self._save_meeting_markdown(meeting)
async def get_meeting(self, meeting_id: str, date: str = None) -> Optional[MeetingInfo]:
"""
获取会议信息
Args:
meeting_id: 会议 ID
date: 日期,默认为今天
Returns:
会议信息
"""
if date is None:
date = datetime.now().strftime("%Y-%m-%d")
json_path = self._get_meeting_file(meeting_id, date).replace(".md", ".json")
data = await self._storage.read_json(json_path)
if data:
return self._parse_meeting_data(data)
return None
async def list_meetings(self, date: str = None) -> List[MeetingInfo]:
"""
列出指定日期的会议
Args:
date: 日期,默认为今天
Returns:
会议列表
"""
if date is None:
date = datetime.now().strftime("%Y-%m-%d")
meetings_dir = Path(self._storage.base_path) / self._get_meeting_dir(date)
if not meetings_dir.exists():
return []
meetings = []
for json_file in meetings_dir.glob("*.json"):
data = await self._storage.read_json(f"meetings/{date}/{json_file.name}")
if data:
meetings.append(self._parse_meeting_data(data))
return sorted(meetings, key=lambda m: m.created_at)
async def end_meeting(self, meeting_id: str, consensus: str = "") -> bool:
"""
结束会议
Args:
meeting_id: 会议 ID
consensus: 最终共识
Returns:
是否成功
"""
async with self._lock:
date = datetime.now().strftime("%Y-%m-%d")
json_path = self._get_meeting_file(meeting_id, date).replace(".md", ".json")
data = await self._storage.read_json(json_path)
if not data:
return False
meeting = self._parse_meeting_data(data)
meeting.status = "completed"
meeting.ended_at = datetime.now().isoformat()
if consensus:
meeting.consensus = consensus
# 完成当前步骤
if meeting.current_step:
meeting.current_step.mark_completed()
await self._save_meeting_markdown(meeting)
return True
# 全局单例
_recorder_instance: Optional[MeetingRecorder] = None
def get_meeting_recorder() -> MeetingRecorder:
"""获取会议记录服务单例"""
global _recorder_instance
if _recorder_instance is None:
_recorder_instance = MeetingRecorder()
return _recorder_instance

View File

@@ -0,0 +1,172 @@
"""
会议调度器 - 实现栅栏同步Barrier Synchronization
"""
import asyncio
from datetime import datetime
from typing import Dict, List, Optional, Set
from dataclasses import dataclass, asdict
from .storage import get_storage
@dataclass
class MeetingQueue:
"""会议等待队列"""
meeting_id: str
title: str
expected_attendees: List[str]
arrived_attendees: List[str]
status: str = "waiting"
created_at: str = ""
started_at: str = ""
min_required: int = 2
def __post_init__(self):
if not self.created_at:
self.created_at = datetime.now().isoformat()
@property
def is_ready(self) -> bool:
expected = set(self.expected_attendees)
arrived = set(self.arrived_attendees)
return expected.issubset(arrived) and len(arrived) >= self.min_required
@property
def missing_attendees(self) -> List[str]:
return list(set(self.expected_attendees) - set(self.arrived_attendees))
@property
def progress(self) -> str:
return f"{len(self.arrived_attendees)}/{len(self.expected_attendees)}"
class MeetingScheduler:
"""会议调度器 - 栅栏同步实现"""
QUEUES_FILE = "cache/meeting_queue.json"
def __init__(self):
self._storage = get_storage()
self._lock = asyncio.Lock()
self._events: Dict[str, asyncio.Event] = {}
async def _load_queues(self) -> Dict[str, Dict]:
return await self._storage.read_json(self.QUEUES_FILE)
async def _save_queues(self, queues: Dict[str, Dict]) -> None:
await self._storage.write_json(self.QUEUES_FILE, queues)
async def create_meeting(
self,
meeting_id: str,
title: str,
expected_attendees: List[str],
min_required: int = None
) -> MeetingQueue:
async with self._lock:
queue = MeetingQueue(
meeting_id=meeting_id,
title=title,
expected_attendees=expected_attendees,
arrived_attendees=[],
min_required=min_required or len(expected_attendees)
)
queues = await self._load_queues()
queues[meeting_id] = asdict(queue)
await self._save_queues(queues)
return queue
async def get_queue(self, meeting_id: str) -> Optional[MeetingQueue]:
queues = await self._load_queues()
return MeetingQueue(**queues[meeting_id]) if meeting_id in queues else None
async def wait_for_meeting(
self,
agent_id: str,
meeting_id: str,
timeout: int = 300
) -> str:
async with self._lock:
queues = await self._load_queues()
if meeting_id not in queues:
await self.create_meeting(
meeting_id=meeting_id,
title=f"Meeting: {meeting_id}",
expected_attendees=[agent_id],
min_required=1
)
return "started"
queue_data = queues[meeting_id]
if agent_id not in queue_data.get("arrived_attendees", []):
queue_data["arrived_attendees"].append(agent_id)
queue_data["arrived_attendees"].sort()
await self._save_queues(queues)
queue = MeetingQueue(**queue_data)
is_ready = queue.is_ready
if is_ready:
await self._start_meeting(meeting_id)
return "started"
# 等待会议开始
event = self._events.setdefault(meeting_id, asyncio.Event())
try:
await asyncio.wait_for(event.wait(), timeout=timeout)
return "started"
except asyncio.TimeoutError:
return "timeout"
async def _start_meeting(self, meeting_id: str) -> None:
async with self._lock:
queues = await self._load_queues()
if meeting_id in queues:
queues[meeting_id]["status"] = "ready"
queues[meeting_id]["started_at"] = datetime.now().isoformat()
await self._save_queues(queues)
# 唤醒所有等待者
event = self._events.get(meeting_id)
if event:
event.set()
async def end_meeting(self, meeting_id: str) -> bool:
async with self._lock:
queues = await self._load_queues()
if meeting_id not in queues:
return False
queues[meeting_id]["status"] = "ended"
await self._save_queues(queues)
self._events.pop(meeting_id, None)
return True
async def get_all_queues(self) -> List[MeetingQueue]:
queues = await self._load_queues()
return [MeetingQueue(**data) for data in queues.values()]
async def add_attendee(self, meeting_id: str, agent_id: str) -> bool:
async with self._lock:
queues = await self._load_queues()
if meeting_id not in queues:
return False
if agent_id not in queues[meeting_id]["expected_attendees"]:
queues[meeting_id]["expected_attendees"].append(agent_id)
await self._save_queues(queues)
return True
return False
# 简化单例实现
_scheduler_instance: Optional[MeetingScheduler] = None
def get_meeting_scheduler() -> MeetingScheduler:
global _scheduler_instance
if _scheduler_instance is None:
_scheduler_instance = MeetingScheduler()
return _scheduler_instance

View File

@@ -0,0 +1,436 @@
"""
Agent 进程管理器
负责启动、停止和监控 Agent 进程/任务。
支持两种类型的 Agent:
1. NativeLLMAgent - 异步任务,无需外部进程
2. ProcessWrapperAgent - 外部 CLI 工具,需要进程管理
"""
import asyncio
import logging
import signal
import subprocess
import psutil
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from ..adapters.native_llm_agent import NativeLLMAgent, NativeLLMAgentFactory
from .heartbeat import get_heartbeat_service
from .agent_registry import get_agent_registry
logger = logging.getLogger(__name__)
class AgentStatus(Enum):
"""Agent 状态"""
STOPPED = "stopped"
STARTING = "starting"
RUNNING = "running"
STOPPING = "stopping"
CRASHED = "crashed"
UNKNOWN = "unknown"
@dataclass
class AgentProcess:
"""Agent 进程信息"""
agent_id: str
agent_type: str # native_llm, process_wrapper
status: AgentStatus = AgentStatus.STOPPED
process: Optional[subprocess.Popen] = None
task: Optional[asyncio.Task] = None
agent: Optional[NativeLLMAgent] = None
started_at: Optional[datetime] = None
stopped_at: Optional[datetime] = None
restart_count: int = 0
config: Dict[str, Any] = field(default_factory=dict)
@property
def uptime(self) -> Optional[float]:
"""运行时长(秒)"""
if self.started_at:
end = self.stopped_at or datetime.now()
return (end - self.started_at).total_seconds()
return None
@property
def is_alive(self) -> bool:
"""是否存活"""
if self.agent_type == "native_llm":
return self.status == AgentStatus.RUNNING and self.task and not self.task.done()
else:
return self.process and self.process.poll() is None
class ProcessManager:
"""
Agent 进程管理器
功能:
1. 启动/停止 Agent
2. 监控 Agent 健康状态
3. 自动重启崩溃的 Agent
4. 管理 Agent 生命周期
"""
def __init__(self):
self.processes: Dict[str, AgentProcess] = {}
self.heartbeat_service = get_heartbeat_service()
self.registry = get_agent_registry()
self._monitor_task: Optional[asyncio.Task] = None
self._running = False
async def start_agent(
self,
agent_id: str,
agent_type: str = "native_llm",
config: Dict[str, Any] = None
) -> bool:
"""
启动 Agent
参数:
agent_id: Agent 唯一标识
agent_type: Agent 类型 (native_llm, process_wrapper)
config: Agent 配置
返回:
是否成功启动
"""
if agent_id in self.processes and self.processes[agent_id].is_alive:
logger.warning(f"Agent {agent_id} 已在运行")
return False
logger.info(f"启动 Agent: {agent_id} (类型: {agent_type})")
process_info = AgentProcess(
agent_id=agent_id,
agent_type=agent_type,
status=AgentStatus.STARTING,
config=config or {}
)
try:
if agent_type == "native_llm":
success = await self._start_native_agent(process_info)
elif agent_type == "process_wrapper":
success = await self._start_process_wrapper(process_info)
else:
logger.error(f"不支持的 Agent 类型: {agent_type}")
return False
if success:
process_info.status = AgentStatus.RUNNING
process_info.started_at = datetime.now()
self.processes[agent_id] = process_info
# 启动监控任务
if not self._running:
await self.start_monitor()
return True
return False
except Exception as e:
logger.error(f"启动 Agent 失败: {agent_id}: {e}", exc_info=True)
process_info.status = AgentStatus.CRASHED
return False
async def _start_native_agent(self, process_info: AgentProcess) -> bool:
"""启动原生 LLM Agent"""
try:
# 创建 Agent 实例
agent = await NativeLLMAgentFactory.create(
agent_id=process_info.agent_id,
name=process_info.config.get("name"),
role=process_info.config.get("role", "developer"),
model=process_info.config.get("model", "claude-sonnet-4.6"),
config=process_info.config
)
process_info.agent = agent
# 创建后台任务保持 Agent 运行
async def agent_loop():
try:
# Agent 定期发送心跳
while True:
await asyncio.sleep(30)
if await agent.health_check():
await agent.update_heartbeat("idle")
else:
logger.warning(f"Agent {process_info.agent_id} 健康检查失败")
break
except asyncio.CancelledError:
logger.info(f"Agent {process_info.agent_id} 任务被取消")
except Exception as e:
logger.error(f"Agent {process_info.agent_id} 循环出错: {e}")
task = asyncio.create_task(agent_loop())
process_info.task = task
logger.info(f"原生 LLM Agent 启动成功: {process_info.agent_id}")
return True
except Exception as e:
logger.error(f"启动原生 Agent 失败: {e}")
return False
async def _start_process_wrapper(self, process_info: AgentProcess) -> bool:
"""启动进程包装 Agent"""
command = process_info.config.get("command")
args = process_info.config.get("args", [])
if not command:
logger.error("进程包装 Agent 需要指定 command")
return False
try:
# 启动子进程
proc = subprocess.Popen(
[command] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
process_info.process = proc
# 创建监控任务
async def process_monitor():
try:
while True:
if proc.poll() is not None:
logger.warning(f"进程 {process_info.agent_id} 已退出: {proc.returncode}")
break
await asyncio.sleep(5)
except asyncio.CancelledError:
# 尝试优雅关闭
try:
proc.terminate()
proc.wait(timeout=5)
except:
proc.kill()
task = asyncio.create_task(process_monitor())
process_info.task = task
logger.info(f"进程包装 Agent 启动成功: {process_info.agent_id} (PID: {proc.pid})")
return True
except Exception as e:
logger.error(f"启动进程失败: {e}")
return False
async def stop_agent(self, agent_id: str, graceful: bool = True) -> bool:
"""
停止 Agent
参数:
agent_id: Agent ID
graceful: 是否优雅关闭
返回:
是否成功停止
"""
if agent_id not in self.processes:
logger.warning(f"Agent {agent_id} 未运行")
return False
process_info = self.processes[agent_id]
logger.info(f"停止 Agent: {agent_id} (优雅: {graceful})")
process_info.status = AgentStatus.STOPPING
try:
if process_info.agent:
# 关闭原生 Agent
await process_info.agent.shutdown()
if process_info.task:
# 取消后台任务
process_info.task.cancel()
try:
await process_info.task
except asyncio.CancelledError:
pass
if process_info.process:
# 终止外部进程
if graceful:
process_info.process.terminate()
try:
process_info.process.wait(timeout=5)
except subprocess.TimeoutExpired:
process_info.process.kill()
else:
process_info.process.kill()
process_info.status = AgentStatus.STOPPED
process_info.stopped_at = datetime.now()
# 从进程列表移除
del self.processes[agent_id]
return True
except Exception as e:
logger.error(f"停止 Agent 失败: {agent_id}: {e}")
process_info.status = AgentStatus.CRASHED
return False
async def restart_agent(self, agent_id: str) -> bool:
"""重启 Agent"""
logger.info(f"重启 Agent: {agent_id}")
if agent_id in self.processes:
process_info = self.processes[agent_id]
config = process_info.config
agent_type = process_info.agent_type
await self.stop_agent(agent_id)
process_info.restart_count += 1
return await self.start_agent(agent_id, agent_type, config)
return False
def get_agent_status(self, agent_id: str) -> Optional[AgentStatus]:
"""获取 Agent 状态"""
if agent_id in self.processes:
return self.processes[agent_id].status
return AgentStatus.UNKNOWN
def get_all_agents(self) -> Dict[str, AgentProcess]:
"""获取所有 Agent 信息"""
return self.processes.copy()
def get_running_agents(self) -> List[str]:
"""获取正在运行的 Agent ID 列表"""
return [
pid for pid, proc in self.processes.items()
if proc.is_alive
]
async def monitor_agent_health(self) -> Dict[str, bool]:
"""
监控所有 Agent 健康状态
返回:
{agent_id: is_healthy}
"""
results = {}
for agent_id, process_info in self.processes.items():
if process_info.agent_type == "native_llm" and process_info.agent:
# 检查原生 Agent 健康状态
results[agent_id] = await process_info.agent.health_check()
else:
# 检查进程是否存活
results[agent_id] = process_info.is_alive
return results
async def start_monitor(self, interval: int = 30):
"""启动监控任务"""
if self._running:
return
self._running = True
async def monitor_loop():
while self._running:
try:
await self._check_agents()
await asyncio.sleep(interval)
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"监控循环出错: {e}", exc_info=True)
await asyncio.sleep(interval)
self._monitor_task = asyncio.create_task(monitor_loop())
logger.info("监控任务已启动")
async def stop_monitor(self):
"""停止监控任务"""
self._running = False
if self._monitor_task:
self._monitor_task.cancel()
try:
await self._monitor_task
except asyncio.CancelledError:
pass
logger.info("监控任务已停止")
async def _check_agents(self):
"""检查所有 Agent 状态"""
health_results = await self.monitor_agent_health()
for agent_id, is_healthy in health_results.items():
if not is_healthy:
logger.warning(f"Agent {agent_id} 不健康")
# 检查是否需要自动重启
process_info = self.processes.get(agent_id)
if process_info and process_info.config.get("auto_restart", False):
if process_info.restart_count < process_info.config.get("max_restarts", 3):
logger.info(f"自动重启 Agent: {agent_id}")
await self.restart_agent(agent_id)
else:
logger.error(f"Agent {agent_id} 重启次数超限,标记为崩溃")
process_info.status = AgentStatus.CRASHED
async def shutdown_all(self):
"""关闭所有 Agent"""
logger.info("关闭所有 Agent...")
agent_ids = list(self.processes.keys())
for agent_id in agent_ids:
await self.stop_agent(agent_id)
await self.stop_monitor()
logger.info("所有 Agent 已关闭")
def get_summary(self) -> Dict[str, Any]:
"""获取进程管理器摘要"""
running = self.get_running_agents()
total = len(self.processes)
status_counts = {}
for proc in self.processes.values():
status = proc.status.value
status_counts[status] = status_counts.get(status, 0) + 1
return {
"total_agents": total,
"running_agents": len(running),
"running_agent_ids": running,
"status_counts": status_counts,
"monitor_running": self._running
}
# 单例获取函数
_process_manager: Optional[ProcessManager] = None
def get_process_manager() -> ProcessManager:
"""获取进程管理器单例"""
global _process_manager
if _process_manager is None:
_process_manager = ProcessManager()
return _process_manager
def reset_process_manager():
"""重置进程管理器(主要用于测试)"""
global _process_manager
_process_manager = None

View File

@@ -0,0 +1,232 @@
"""
资源管理器 - 整合文件锁和心跳服务
提供声明式的任务执行接口,自动管理资源获取和释放
"""
import asyncio
import re
from typing import List, Dict, Optional
from dataclasses import dataclass
from .storage import get_storage
from .file_lock import get_file_lock_service
from .heartbeat import get_heartbeat_service
from .agent_registry import get_agent_registry
@dataclass
class TaskResult:
"""任务执行结果"""
success: bool
message: str
files_locked: List[str] = None
duration_seconds: float = 0.0
def __post_init__(self):
if self.files_locked is None:
self.files_locked = []
class ResourceManager:
"""
资源管理器
整合文件锁和心跳服务,提供声明式的任务执行接口:
- 自动解析任务中的文件路径
- 自动获取文件锁
- 自动更新心跳
- 任务完成后自动释放资源
"""
def __init__(self):
self._lock_service = get_file_lock_service()
self._heartbeat_service = get_heartbeat_service()
self._agent_registry = get_agent_registry()
# 文件路径正则模式
FILE_PATTERNS = [
r'[\w/]+\.(py|js|ts|tsx|jsx|java|go|rs|c|cpp|h|hpp|cs|swift|kt|rb|php|sh|bash|zsh|yaml|yml|json|xml|html|css|scss|md|txt|sql)',
r'[\w/]+/(?:src|lib|app|components|services|utils|tests|test|spec|config|assets|static|views|controllers|models|routes)/[\w./]+',
]
def _extract_files_from_task(self, task_description: str) -> List[str]:
"""
从任务描述中提取文件路径
Args:
task_description: 任务描述
Returns:
提取的文件路径列表
"""
files = []
for pattern in self.FILE_PATTERNS:
matches = re.findall(pattern, task_description)
files.extend(matches)
# 去重并过滤
seen = set()
result = []
for f in files:
# 标准化路径
normalized = f.strip().replace('\\', '/')
if normalized and normalized not in seen and len(normalized) > 3:
seen.add(normalized)
result.append(normalized)
return result
async def execute_task(
self,
agent_id: str,
task_description: str,
timeout: int = 300
) -> TaskResult:
"""
执行任务(声明式接口)
内部流程:
1. 解析任务需要的文件
2. 获取所有文件锁
3. 更新心跳状态
4. 执行任务(这里是模拟)
5. finally: 释放所有锁
Args:
agent_id: Agent ID
task_description: 任务描述
timeout: 超时时间(秒)
Returns:
任务执行结果
"""
import time
start_time = time.time()
# 1. 解析文件
files = self._extract_files_from_task(task_description)
# 2. 获取文件锁
acquired_files = []
for file_path in files:
success = await self._lock_service.acquire_lock(
file_path, agent_id, agent_id[:3].upper()
)
if success:
acquired_files.append(file_path)
try:
# 3. 更新心跳
await self._heartbeat_service.update_heartbeat(
agent_id,
status="working",
current_task=task_description,
progress=0
)
# 4. 执行任务(这里只是模拟,实际需要调用 Agent
# 实际实现中,这里会通过 CLIPluginAdapter 调用 Agent
await asyncio.sleep(0.1) # 模拟执行
duration = time.time() - start_time
return TaskResult(
success=True,
message=f"Task executed: {task_description}",
files_locked=acquired_files,
duration_seconds=duration
)
finally:
# 5. 释放所有锁
for file_path in acquired_files:
await self._lock_service.release_lock(file_path, agent_id)
# 更新心跳为 idle
await self._heartbeat_service.update_heartbeat(
agent_id,
status="idle",
current_task="",
progress=100
)
async def parse_task_files(self, task_description: str) -> List[str]:
"""
解析任务中的文件路径
Args:
task_description: 任务描述
Returns:
文件路径列表
"""
return self._extract_files_from_task(task_description)
async def get_agent_status(self, agent_id: str) -> Dict:
"""
获取 Agent 状态(整合锁和心跳信息)
Args:
agent_id: Agent ID
Returns:
Agent 状态信息
"""
# 获取心跳信息
heartbeat = await self._heartbeat_service.get_heartbeat(agent_id)
# 获取持有的锁
locks = await self._lock_service.get_agent_locks(agent_id)
# 获取注册信息
agent_info = await self._agent_registry.get_agent(agent_id)
# 获取运行时状态
agent_state = await self._agent_registry.get_state(agent_id)
return {
"agent_id": agent_id,
"info": {
"name": agent_info.name if agent_info else "",
"role": agent_info.role if agent_info else "",
"model": agent_info.model if agent_info else "",
},
"heartbeat": {
"status": heartbeat.status if heartbeat else "unknown",
"current_task": heartbeat.current_task if heartbeat else "",
"progress": heartbeat.progress if heartbeat else 0,
"elapsed": heartbeat.elapsed_display if heartbeat else "",
},
"locks": [
{"file": lock.file_path, "elapsed": lock.elapsed_display}
for lock in locks
],
"state": {
"task": agent_state.current_task if agent_state else "",
"progress": agent_state.progress if agent_state else 0,
"working_files": agent_state.working_files if agent_state else [],
}
}
async def get_all_status(self) -> List[Dict]:
"""
获取所有 Agent 的状态
Returns:
所有 Agent 状态列表
"""
agents = await self._agent_registry.list_agents()
statuses = []
for agent in agents:
status = await self.get_agent_status(agent.agent_id)
statuses.append(status)
return statuses
# 全局单例
_manager_instance: Optional[ResourceManager] = None
def get_resource_manager() -> ResourceManager:
"""获取资源管理器单例"""
global _manager_instance
if _manager_instance is None:
_manager_instance = ResourceManager()
return _manager_instance

View File

@@ -0,0 +1,199 @@
"""
角色分配器 - AI 驱动的角色分配
分析任务描述,自动为 Agent 分配最适合的角色
"""
import asyncio
from typing import Dict, List, Optional
from dataclasses import dataclass
from enum import Enum
from .agent_registry import AgentRegistry, AgentInfo
class AgentRole(str, Enum):
"""Agent 角色枚举"""
ARCHITECT = "architect"
PRODUCT_MANAGER = "pm"
DEVELOPER = "developer"
QA = "qa"
REVIEWER = "reviewer"
@dataclass
class RoleWeight:
"""角色权重配置"""
role: str
weight: float
keywords: List[str]
def matches(self, text: str) -> int:
"""计算匹配分数"""
score = 0
text_lower = text.lower()
for keyword in self.keywords:
if keyword.lower() in text_lower:
score += 1
return score
class RoleAllocator:
"""
角色分配器
分析任务描述,为 Agent 分配最适合的角色
"""
# 角色权重配置(来自 design-spec.md
ROLE_WEIGHTS = {
"pm": RoleWeight("pm", 1.5, ["需求", "产品", "规划", "用户", "功能", "priority", "requirement", "product"]),
"architect": RoleWeight("architect", 1.5, ["架构", "设计", "方案", "技术", "系统", "design", "architecture"]),
"developer": RoleWeight("developer", 1.0, ["开发", "实现", "编码", "代码", "function", "implement", "code"]),
"reviewer": RoleWeight("reviewer", 1.3, ["审查", "review", "检查", "验证", "校对", "check"]),
"qa": RoleWeight("qa", 1.2, ["测试", "test", "质量", "bug", "验证", "quality"]),
}
def __init__(self):
pass
def _analyze_task_roles(self, task: str) -> Dict[str, float]:
"""
分析任务需要的角色及其权重
Args:
task: 任务描述
Returns:
角色权重字典
"""
scores = {}
for role_name, role_weight in self.ROLE_WEIGHTS.items():
match_score = role_weight.matches(task)
if match_score > 0:
scores[role_name] = match_score * role_weight.weight
else:
# 即使没有匹配关键词,也给予基础权重
scores[role_name] = 0.1 * role_weight.weight
return scores
async def allocate_roles(
self,
task: str,
available_agents: List[str]
) -> Dict[str, str]:
"""
为任务分配角色
Args:
task: 任务描述
available_agents: 可用的 Agent ID 列表
Returns:
Agent ID -> 角色映射
"""
# 获取所有 Agent 信息
# 注意:在实际实现中,这会从 AgentRegistry 获取
# 这里简化处理,假设已有 Agent 信息
# 分析任务需要的角色
role_scores = self._analyze_task_roles(task)
# 按分数排序角色
sorted_roles = sorted(role_scores.items(), key=lambda x: -x[1])
# 简单分配:将可用 Agent 按顺序分配给角色
allocation = {}
for i, agent_id in enumerate(available_agents):
if i < len(sorted_roles):
allocation[agent_id] = sorted_roles[i][0]
else:
allocation[agent_id] = "developer" # 默认角色
return allocation
def get_primary_role(self, task: str) -> str:
"""
获取任务的主要角色
Args:
task: 任务描述
Returns:
主要角色名称
"""
role_scores = self._analyze_task_roles(task)
if not role_scores:
return "developer"
return max(role_scores.items(), key=lambda x: x[1])[0]
async def suggest_agents_for_task(
self,
task: str,
all_agents: List[AgentInfo],
count: int = 3
) -> List[AgentInfo]:
"""
为任务推荐合适的 Agent
Args:
task: 任务描述
all_agents: 所有可用 Agent 列表
count: 推荐数量
Returns:
推荐的 Agent 列表
"""
primary_role = self.get_primary_role(task)
# 按角色匹配度排序
scored_agents = []
for agent in all_agents:
if agent.role == primary_role:
scored_agents.append((agent, 10)) # 完全匹配高分
elif agent.role in ["architect", "developer", "reviewer"]:
scored_agents.append((agent, 5)) # 相关角色中分
else:
scored_agents.append((agent, 1)) # 其他角色低分
# 按分数排序
scored_agents.sort(key=lambda x: -x[1])
return [agent for agent, _ in scored_agents[:count]]
def explain_allocation(self, task: str, allocation: Dict[str, str]) -> str:
"""
解释角色分配的原因
Args:
task: 任务描述
allocation: 分配结果
Returns:
解释文本
"""
role_scores = self._analyze_task_roles(task)
primary = self.get_primary_role(task)
lines = [f"任务分析: {task}", f"主要角色: {primary}"]
lines.append("角色权重:")
for role, score in sorted(role_scores.items(), key=lambda x: -x[1]):
lines.append(f" - {role}: {score:.2f}")
lines.append("分配结果:")
for agent_id, role in allocation.items():
lines.append(f" - {agent_id}: {role}")
return "\n".join(lines)
# 全局单例
_allocator_instance: Optional[RoleAllocator] = None
def get_role_allocator() -> RoleAllocator:
"""获取角色分配器单例"""
global _allocator_instance
if _allocator_instance is None:
_allocator_instance = RoleAllocator()
return _allocator_instance

View File

@@ -0,0 +1,146 @@
"""
基础存储服务 - 提供 JSON 文件的异步读写操作
所有服务共享的底层存储抽象
"""
import json
import asyncio
from pathlib import Path
from typing import Any, Dict, Optional
import aiofiles
import aiofiles.os
class StorageService:
"""异步 JSON 文件存储服务"""
def __init__(self, base_path: str = ".doc"):
"""
初始化存储服务
Args:
base_path: 基础存储路径,默认为 .doc
"""
self.base_path = Path(base_path)
self._lock = asyncio.Lock() # 简单的内存锁,防止并发写入
async def ensure_dir(self, path: str) -> None:
"""
确保目录存在,不存在则创建
Args:
path: 目录路径(相对于 base_path 或绝对路径)
"""
dir_path = self.base_path / path if not Path(path).is_absolute() else Path(path)
await aiofiles.os.makedirs(dir_path, exist_ok=True)
async def read_json(self, path: str) -> Dict[str, Any]:
"""
读取 JSON 文件
Args:
path: 文件路径(相对于 base_path 或绝对路径)
Returns:
解析后的 JSON 字典,文件不存在或为空时返回空字典
"""
file_path = self.base_path / path if not Path(path).is_absolute() else Path(path)
if not await aiofiles.os.path.exists(file_path):
return {}
async with aiofiles.open(file_path, mode="r", encoding="utf-8") as f:
content = await f.read()
if not content.strip():
return {}
return json.loads(content)
async def write_json(self, path: str, data: Dict[str, Any]) -> None:
"""
写入 JSON 文件
Args:
path: 文件路径(相对于 base_path 或绝对路径)
data: 要写入的 JSON 数据
"""
file_path = self.base_path / path if not Path(path).is_absolute() else Path(path)
# 确保父目录存在
await self.ensure_dir(str(file_path.parent))
# 使用锁防止并发写入冲突
async with self._lock:
async with aiofiles.open(file_path, mode="w", encoding="utf-8") as f:
await f.write(json.dumps(data, ensure_ascii=False, indent=2))
async def append_json_list(self, path: str, item: Any) -> None:
"""
向 JSON 数组文件追加一项
Args:
path: 文件路径
item: 要追加的项
"""
data = await self.read_json(path)
if not isinstance(data, list):
data = []
data.append(item)
await self.write_json(path, {"items": data})
async def delete(self, path: str) -> bool:
"""
删除文件
Args:
path: 文件路径
Returns:
是否成功删除
"""
file_path = self.base_path / path if not Path(path).is_absolute() else Path(path)
if await aiofiles.os.path.exists(file_path):
await aiofiles.os.remove(file_path)
return True
return False
async def exists(self, path: str) -> bool:
"""
检查文件是否存在
Args:
path: 文件路径
Returns:
文件是否存在
"""
file_path = self.base_path / path if not Path(path).is_absolute() else Path(path)
return await aiofiles.os.path.exists(file_path)
# 全局单例实例
_storage_instance: Optional[StorageService] = None
def _find_project_root() -> Path:
"""查找项目根目录(包含 CLAUDE.md 的目录)"""
current = Path.cwd()
# 向上查找项目根目录
for parent in [current] + list(current.parents):
if (parent / "CLAUDE.md").exists():
return parent
# 如果找不到,使用当前目录的父目录(假设从 backend/ 运行)
if current.name == "backend":
return current.parent
# 默认使用当前目录
return current
def get_storage() -> StorageService:
"""获取存储服务单例,使用项目根目录下的 .doc 目录"""
global _storage_instance
if _storage_instance is None:
project_root = _find_project_root()
doc_path = project_root / ".doc"
_storage_instance = StorageService(str(doc_path))
return _storage_instance

View File

@@ -0,0 +1,473 @@
"""
工作流引擎 - 管理和执行工作流
支持从 YAML 文件加载工作流定义,并跟踪进度
"""
import asyncio
from pathlib import Path
from typing import Dict, List, Optional
from dataclasses import dataclass, field, asdict
from datetime import datetime
import yaml
from .storage import get_storage
from .meeting_recorder import get_meeting_recorder
@dataclass
class WorkflowMeeting:
"""工作流中的节点"""
meeting_id: str
title: str
attendees: List[str]
depends_on: List[str] = field(default_factory=list)
completed: bool = False
node_type: str = "meeting" # meeting | execution
min_required: int = None # 最少完成人数execution 节点用)
on_failure: str = None # 失败时跳转的节点 ID
# 执行状态追踪execution 节点专用)
completed_attendees: List[str] = field(default_factory=list)
@property
def is_ready(self) -> bool:
"""检查节点是否可以开始(依赖已完成)"""
return all(dep in self.depends_on for dep in self.depends_on)
@property
def is_execution_ready(self) -> bool:
"""检查执行节点是否所有人都完成了"""
if self.node_type != "execution":
return False
required = self.min_required or len(self.attendees)
return len(self.completed_attendees) >= required
@property
def missing_attendees(self) -> List[str]:
"""获取未完成的人员列表"""
if self.node_type != "execution":
return []
return [a for a in self.attendees if a not in self.completed_attendees]
@property
def progress(self) -> str:
"""执行进度"""
if self.node_type != "execution":
return "N/A"
return f"{len(self.completed_attendees)}/{len(self.attendees)}"
@dataclass
class Workflow:
"""工作流定义"""
workflow_id: str
name: str
description: str
meetings: List[WorkflowMeeting]
created_at: str = ""
status: str = "pending" # pending, in_progress, completed
def __post_init__(self):
if not self.created_at:
self.created_at = datetime.now().isoformat()
@property
def progress(self) -> str:
"""进度摘要"""
total = len(self.meetings)
completed = sum(1 for m in self.meetings if m.completed)
return f"{completed}/{total}"
@property
def current_meeting(self) -> Optional[WorkflowMeeting]:
"""获取当前应该进行的会议(第一个未完成的)"""
for meeting in self.meetings:
if not meeting.completed:
return meeting
return None
@property
def is_completed(self) -> bool:
"""工作流是否完成"""
return all(m.completed for m in self.meetings)
class WorkflowEngine:
"""
工作流引擎
管理工作流的加载、执行和进度跟踪
"""
WORKFLOWS_DIR = "workflow"
def __init__(self):
self._storage = get_storage()
self._recorder = get_meeting_recorder()
self._loaded_workflows: Dict[str, Workflow] = {}
# 注册的工作流文件路径
self._workflow_files: Dict[str, str] = {}
async def load_workflow(self, workflow_path: str) -> Workflow:
"""
从 YAML 文件加载工作流
Args:
workflow_path: YAML 文件路径(相对于 .doc/workflow/
Returns:
加载的工作流
"""
import aiofiles
# 构建完整路径
full_path = f"{self.WORKFLOWS_DIR}/{workflow_path}"
yaml_path = Path(self._storage.base_path) / full_path
if not yaml_path.exists():
raise ValueError(f"Workflow file not found: {workflow_path}")
# 读取 YAML 内容
async with aiofiles.open(yaml_path, mode="r", encoding="utf-8") as f:
yaml_content = await f.read()
content = yaml.safe_load(yaml_content)
# 解析工作流
meetings = []
for m in content.get("meetings", []):
meetings.append(WorkflowMeeting(
meeting_id=m["meeting_id"],
title=m["title"],
attendees=m["attendees"],
depends_on=m.get("depends_on", []),
node_type=m.get("node_type", "meeting"),
min_required=m.get("min_required"),
on_failure=m.get("on_failure")
))
workflow = Workflow(
workflow_id=content["workflow_id"],
name=content["name"],
description=content.get("description", ""),
meetings=meetings
)
self._loaded_workflows[workflow.workflow_id] = workflow
# 保存源文件路径,以便后续可以重新加载
self._workflow_files[workflow.workflow_id] = workflow_path
return workflow
async def get_next_meeting(self, workflow_id: str) -> Optional[WorkflowMeeting]:
"""
获取工作流中下一个应该进行的会议
Args:
workflow_id: 工作流 ID
Returns:
下一个会议,如果没有或已完成返回 None
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return None
return workflow.current_meeting
async def complete_meeting(self, workflow_id: str, meeting_id: str) -> bool:
"""
标记会议为已完成
Args:
workflow_id: 工作流 ID
meeting_id: 会议 ID
Returns:
是否成功标记
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return False
for meeting in workflow.meetings:
if meeting.meeting_id == meeting_id:
meeting.completed = True
# 更新工作流状态
if workflow.is_completed:
workflow.status = "completed"
else:
workflow.status = "in_progress"
return True
return False
async def create_workflow_meeting(
self,
workflow_id: str,
meeting_id: str
) -> bool:
"""
创建工作流中的会议记录
Args:
workflow_id: 工作流 ID
meeting_id: 会议 ID
Returns:
是否成功创建
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return False
meeting = None
for m in workflow.meetings:
if m.meeting_id == meeting_id:
meeting = m
break
if not meeting:
return False
# 创建会议记录
await self._recorder.create_meeting(
meeting_id=meeting.meeting_id,
title=f"{workflow.name}: {meeting.title}",
attendees=meeting.attendees,
steps=["收集初步想法", "讨论与迭代", "生成共识版本"]
)
return True
async def get_workflow_status(self, workflow_id: str) -> Optional[Dict]:
"""
获取工作流状态
Args:
workflow_id: 工作流 ID
Returns:
工作流状态信息
"""
# 如果不在内存中,尝试重新加载
if workflow_id not in self._loaded_workflows and workflow_id in self._workflow_files:
await self.load_workflow(self._workflow_files[workflow_id])
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return None
return {
"workflow_id": workflow.workflow_id,
"name": workflow.name,
"description": workflow.description,
"status": workflow.status,
"progress": workflow.progress,
"meetings": [
{
"meeting_id": m.meeting_id,
"title": m.title,
"completed": m.completed
}
for m in workflow.meetings
]
}
async def list_workflows(self) -> List[Dict]:
"""
列出所有加载的工作流
Returns:
工作流列表
"""
return [
{
"workflow_id": w.workflow_id,
"name": w.name,
"status": w.status,
"progress": w.progress
}
for w in self._loaded_workflows.values()
]
# ========== 执行节点相关方法 ==========
async def join_execution_node(
self,
workflow_id: str,
meeting_id: str,
agent_id: str
) -> Dict:
"""
Agent 加入执行节点(标记完成)
Args:
workflow_id: 工作流 ID
meeting_id: 执行节点 ID
agent_id: Agent ID
Returns:
状态信息 {"status": "waiting"|"ready"|"completed", "progress": "2/3"}
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return {"status": "error", "message": "Workflow not found"}
for meeting in workflow.meetings:
if meeting.meeting_id == meeting_id:
if meeting.node_type != "execution":
return {"status": "error", "message": "Not an execution node"}
if agent_id not in meeting.completed_attendees:
meeting.completed_attendees.append(agent_id)
if meeting.is_execution_ready:
return {
"status": "ready",
"progress": meeting.progress,
"message": "所有 Agent 已完成,可以进入下一节点"
}
return {
"status": "waiting",
"progress": meeting.progress,
"missing": meeting.missing_attendees,
"message": f"等待其他 Agent 完成: {meeting.missing_attendees}"
}
return {"status": "error", "message": "Meeting not found"}
async def get_execution_status(
self,
workflow_id: str,
meeting_id: str
) -> Optional[Dict]:
"""
获取执行节点的状态
Returns:
执行状态信息
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return None
for meeting in workflow.meetings:
if meeting.meeting_id == meeting_id:
return {
"meeting_id": meeting.meeting_id,
"title": meeting.title,
"node_type": meeting.node_type,
"attendees": meeting.attendees,
"completed_attendees": meeting.completed_attendees,
"progress": meeting.progress,
"is_ready": meeting.is_execution_ready,
"missing": meeting.missing_attendees
}
return None
# ========== 条件跳转相关方法 ==========
async def jump_to_node(
self,
workflow_id: str,
target_meeting_id: str
) -> bool:
"""
强制跳转到指定节点(重置后续所有节点)
Args:
workflow_id: 工作流 ID
target_meeting_id: 目标节点 ID
Returns:
是否成功跳转
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return False
# 找到目标节点并重置从它开始的所有后续节点
target_found = False
for meeting in workflow.meetings:
if meeting.meeting_id == target_meeting_id:
target_found = True
meeting.completed = False
meeting.completed_attendees = []
elif target_found:
# 目标节点之后的所有节点都重置
meeting.completed = False
meeting.completed_attendees = []
workflow.status = "in_progress"
return target_found
async def handle_failure(
self,
workflow_id: str,
meeting_id: str
) -> Optional[str]:
"""
处理节点失败,根据 on_failure 配置跳转
Args:
workflow_id: 工作流 ID
meeting_id: 失败的节点 ID
Returns:
跳转目标节点 ID如果没有配置则返回 None
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return None
for meeting in workflow.meetings:
if meeting.meeting_id == meeting_id and meeting.on_failure:
await self.jump_to_node(workflow_id, meeting.on_failure)
return meeting.on_failure
return None
async def get_workflow_detail(self, workflow_id: str) -> Optional[Dict]:
"""
获取工作流详细信息(包含所有节点状态)
Returns:
工作流详细信息
"""
workflow = self._loaded_workflows.get(workflow_id)
if not workflow:
return None
return {
"workflow_id": workflow.workflow_id,
"name": workflow.name,
"description": workflow.description,
"status": workflow.status,
"progress": workflow.progress,
"current_node": workflow.current_meeting.meeting_id if workflow.current_meeting else None,
"meetings": [
{
"meeting_id": m.meeting_id,
"title": m.title,
"node_type": m.node_type,
"attendees": m.attendees,
"depends_on": m.depends_on,
"completed": m.completed,
"on_failure": m.on_failure,
"progress": m.progress if m.node_type == "execution" else None
}
for m in workflow.meetings
]
}
# 全局单例
_engine_instance: Optional[WorkflowEngine] = None
def get_workflow_engine() -> WorkflowEngine:
"""获取工作流引擎单例"""
global _engine_instance
if _engine_instance is None:
_engine_instance = WorkflowEngine()
return _engine_instance