- 新增 CLIPluginAdapter 统一接口 (backend/app/core/agent_adapter.py) - 新增 LLM 服务层,支持 Anthropic/OpenAI/DeepSeek/Ollama (backend/app/services/llm_service.py) - 新增 Agent 执行引擎,支持文件锁自动管理 (backend/app/services/agent_executor.py) - 新增 NativeLLMAgent 原生 LLM 适配器 (backend/app/adapters/native_llm_agent.py) - 新增进程管理器 (backend/app/services/process_manager.py) - 新增 Agent 控制 API (backend/app/routers/agents_control.py) - 新增 WebSocket 实时通信 (backend/app/routers/websocket.py) - 更新前端 AgentsPage,支持启动/停止 Agent - 测试通过:Agent 启动、批量操作、栅栏同步 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
487 lines
15 KiB
Python
487 lines
15 KiB
Python
"""
|
|
Agent 执行引擎
|
|
|
|
负责协调 LLM 调用和资源管理,提供声明式的任务执行接口。
|
|
自动管理文件锁、心跳更新等生命周期。
|
|
"""
|
|
|
|
import asyncio
|
|
import logging
|
|
import re
|
|
import time
|
|
from typing import Dict, List, Optional, Any, Callable
|
|
from dataclasses import dataclass, field
|
|
from pathlib import Path
|
|
|
|
from .llm_service import ModelRouter, LLMMessage, TaskType
|
|
from .storage import get_storage
|
|
from .file_lock import get_file_lock_service
|
|
from .heartbeat import get_heartbeat_service
|
|
from .agent_registry import get_agent_registry, AgentInfo
|
|
from ..core.agent_adapter import Task, Result
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@dataclass
|
|
class ExecutionPlan:
|
|
"""任务执行计划"""
|
|
steps: List[str] = field(default_factory=list)
|
|
required_files: List[str] = field(default_factory=list)
|
|
estimated_duration: str = ""
|
|
complexity: str = "medium"
|
|
requires_code_execution: bool = False
|
|
subtasks: List[str] = field(default_factory=list)
|
|
|
|
|
|
@dataclass
|
|
class ExecutionContext:
|
|
"""执行上下文"""
|
|
agent_id: str
|
|
agent_role: str
|
|
agent_model: str
|
|
task_id: str
|
|
acquired_locks: List[str] = field(default_factory=list)
|
|
start_time: float = 0
|
|
messages: List[LLMMessage] = field(default_factory=list)
|
|
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
|
|
|
|
class AgentExecutor:
|
|
"""
|
|
Agent 任务执行引擎
|
|
|
|
功能:
|
|
1. 任务分析 - 解析任务描述,识别需要的文件
|
|
2. 计划生成 - 调用 LLM 生成执行计划
|
|
3. 资源管理 - 自动获取和释放文件锁
|
|
4. 任务执行 - 调用 LLM 执行任务
|
|
5. 结果处理 - 格式化输出,更新状态
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
llm_service: ModelRouter = None,
|
|
storage=None,
|
|
lock_service=None,
|
|
heartbeat_service=None,
|
|
registry=None
|
|
):
|
|
self.llm = llm_service
|
|
self.storage = storage or get_storage()
|
|
self.locks = lock_service or get_file_lock_service()
|
|
self.heartbeat = heartbeat_service or get_heartbeat_service()
|
|
self.registry = registry or get_agent_registry()
|
|
|
|
# 工作目录
|
|
self.work_dir = Path.cwd()
|
|
|
|
async def execute_task(
|
|
self,
|
|
agent: AgentInfo,
|
|
task: Task,
|
|
context: Dict[str, Any] = None
|
|
) -> Result:
|
|
"""
|
|
执行任务的主入口
|
|
|
|
自动管理:
|
|
1. 文件锁获取和释放
|
|
2. 心跳更新
|
|
3. 任务进度跟踪
|
|
4. 错误处理和恢复
|
|
"""
|
|
execution_context = ExecutionContext(
|
|
agent_id=agent.agent_id,
|
|
agent_role=agent.role,
|
|
agent_model=agent.model,
|
|
task_id=task.task_id,
|
|
start_time=time.time(),
|
|
metadata=context or {}
|
|
)
|
|
|
|
try:
|
|
# 1. 更新心跳 - 开始执行
|
|
await self.heartbeat.update_heartbeat(
|
|
agent.agent_id,
|
|
"working",
|
|
task.description[:100], # 截断过长描述
|
|
0
|
|
)
|
|
|
|
# 2. 分析任务,识别需要的文件
|
|
execution_context.required_files = await self._analyze_required_files(
|
|
task.description
|
|
)
|
|
|
|
# 3. 获取文件锁
|
|
await self._acquire_locks(execution_context)
|
|
|
|
# 4. 构建执行上下文消息
|
|
execution_context.messages = await self._build_messages(
|
|
agent, task, execution_context
|
|
)
|
|
|
|
# 5. 调用 LLM 执行任务
|
|
llm_response = await self._call_llm(execution_context)
|
|
|
|
# 6. 处理结果
|
|
result = await self._process_result(
|
|
llm_response, execution_context
|
|
)
|
|
|
|
# 7. 更新心跳 - 完成
|
|
await self.heartbeat.update_heartbeat(
|
|
agent.agent_id,
|
|
"idle",
|
|
"",
|
|
100
|
|
)
|
|
|
|
return result
|
|
|
|
except Exception as e:
|
|
logger.error(f"任务执行失败: {e}", exc_info=True)
|
|
|
|
# 更新心跳为错误状态
|
|
await self.heartbeat.update_heartbeat(
|
|
agent.agent_id,
|
|
"error",
|
|
str(e),
|
|
0
|
|
)
|
|
|
|
return Result(
|
|
success=False,
|
|
output="",
|
|
error=str(e),
|
|
execution_time=time.time() - execution_context.start_time
|
|
)
|
|
|
|
finally:
|
|
# 8. 释放所有锁
|
|
await self._release_locks(execution_context)
|
|
|
|
async def _analyze_required_files(self, task_description: str) -> List[str]:
|
|
"""
|
|
分析任务描述,识别需要的文件
|
|
|
|
使用 LLM 分析任务中提到的文件路径
|
|
"""
|
|
# 使用正则表达式快速匹配文件路径
|
|
file_patterns = [
|
|
r'[a-zA-Z_/\\][a-zA-Z0-9_/\\]*\.(?:py|js|ts|tsx|jsx|java|go|rs|c|h|cpp|hpp|css|html|md|json|yaml|yml)',
|
|
r'[a-zA-Z_/\\][a-zA-Z0-9_/\\]*\.(?:py|js|ts|tsx|jsx)',
|
|
r'src/[a-zA-Z0-9_/\\]*',
|
|
r'app/[a-zA-Z0-9_/\\]*',
|
|
r'components/[a-zA-Z0-9_/\\]*',
|
|
r'pages/[a-zA-Z0-9_/\\]*',
|
|
r'services/[a-zA-Z0-9_/\\]*',
|
|
r'utils/[a-zA-Z0-9_/\\]*',
|
|
]
|
|
|
|
files = set()
|
|
for pattern in file_patterns:
|
|
matches = re.findall(pattern, task_description)
|
|
files.update(matches)
|
|
|
|
# 规范化路径
|
|
normalized_files = []
|
|
for f in files:
|
|
# 转换反斜杠
|
|
f = f.replace('\\', '/')
|
|
# 移除重复的斜杠
|
|
f = re.sub(r'/+', '/', f)
|
|
if f not in normalized_files:
|
|
normalized_files.append(f)
|
|
|
|
logger.debug(f"识别到文件: {normalized_files}")
|
|
return normalized_files
|
|
|
|
async def _acquire_locks(self, context: ExecutionContext) -> None:
|
|
"""获取所有需要的文件锁"""
|
|
for file_path in context.required_files:
|
|
success = await self.locks.acquire_lock(
|
|
file_path,
|
|
context.agent_id,
|
|
context.agent_role.upper()
|
|
)
|
|
if success:
|
|
context.acquired_locks.append(file_path)
|
|
logger.debug(f"获取锁成功: {file_path}")
|
|
else:
|
|
logger.warning(f"获取锁失败: {file_path} (可能被其他 Agent 占用)")
|
|
|
|
async def _release_locks(self, context: ExecutionContext) -> None:
|
|
"""释放所有获取的文件锁"""
|
|
for file_path in context.acquired_locks:
|
|
try:
|
|
await self.locks.release_lock(file_path, context.agent_id)
|
|
logger.debug(f"释放锁: {file_path}")
|
|
except Exception as e:
|
|
logger.warning(f"释放锁失败: {file_path}: {e}")
|
|
|
|
async def _build_messages(
|
|
self,
|
|
agent: AgentInfo,
|
|
task: Task,
|
|
context: ExecutionContext
|
|
) -> List[LLMMessage]:
|
|
"""构建 LLM 消息列表"""
|
|
messages = []
|
|
|
|
# 系统提示词
|
|
system_prompt = self._build_system_prompt(agent, context)
|
|
messages.append(LLMMessage(role="system", content=system_prompt))
|
|
|
|
# 添加上下文信息
|
|
if context.required_files:
|
|
context_info = f"\n相关文件: {', '.join(context.required_files)}\n"
|
|
# 尝试读取文件内容
|
|
file_contents = await self._read_file_contents(context.required_files)
|
|
if file_contents:
|
|
context_info += f"\n文件内容:\n{file_contents}\n"
|
|
messages.append(LLMMessage(role="user", content=context_info))
|
|
|
|
# 任务描述
|
|
messages.append(LLMMessage(role="user", content=task.description))
|
|
|
|
# 添加额外上下文
|
|
if task.context:
|
|
context_str = f"\n额外上下文:\n{json.dumps(task.context, ensure_ascii=False, indent=2)}\n"
|
|
messages.append(LLMMessage(role="user", content=context_str))
|
|
|
|
return messages
|
|
|
|
def _build_system_prompt(self, agent: AgentInfo, context: ExecutionContext) -> str:
|
|
"""构建系统提示词"""
|
|
role_prompts = {
|
|
"architect": """
|
|
你是一个系统架构师。你擅长:
|
|
- 系统设计和模块划分
|
|
- 技术选型和架构决策
|
|
- 接口设计和数据流规划
|
|
- 性能优化和扩展性考虑
|
|
|
|
请给出清晰、完整的架构方案。
|
|
""",
|
|
"pm": """
|
|
你是一个产品经理。你擅长:
|
|
- 需求分析和用户故事
|
|
- 功能优先级排序
|
|
- 产品规划
|
|
- 用户体验考虑
|
|
|
|
请从用户角度分析需求。
|
|
""",
|
|
"developer": """
|
|
你是一个高级开发工程师。你擅长:
|
|
- 编写高质量、可维护的代码
|
|
- 遵循最佳实践和编码规范
|
|
- 考虑边界情况和错误处理
|
|
- 编写清晰的注释和文档
|
|
|
|
请给出可以直接使用的代码实现。
|
|
""",
|
|
"reviewer": """
|
|
你是一个代码审查专家。你擅长:
|
|
- 发现代码中的潜在问题
|
|
- 安全漏洞检测
|
|
- 性能问题识别
|
|
- 代码风格和可读性改进
|
|
|
|
请给出详细的审查意见。
|
|
""",
|
|
"qa": """
|
|
你是一个测试工程师。你擅长:
|
|
- 编写全面的测试用例
|
|
- 边界条件测试
|
|
- 自动化测试
|
|
- 测试策略制定
|
|
|
|
请给出完整的测试方案。
|
|
"""
|
|
}
|
|
|
|
base_prompt = f"""你是 {agent.name},一个 AI 编程助手。
|
|
|
|
当前任务 ID: {context.task_id}
|
|
你的角色: {agent.role}
|
|
使用的模型: {agent.model}
|
|
|
|
工作原则:
|
|
1. 仔细理解任务需求
|
|
2. 给出清晰、具体的回答
|
|
3. 如果涉及代码,确保代码正确且可运行
|
|
4. 考虑边界情况和错误处理
|
|
5. 必要时给出解释和说明
|
|
"""
|
|
|
|
role_prompt = role_prompts.get(agent.role, "")
|
|
|
|
return base_prompt + role_prompt
|
|
|
|
async def _read_file_contents(self, file_paths: List[str]) -> str:
|
|
"""读取文件内容(如果存在)"""
|
|
contents = []
|
|
for file_path in file_paths[:3]: # 限制读取文件数量
|
|
full_path = self.work_dir / file_path
|
|
if full_path.exists():
|
|
try:
|
|
with open(full_path, 'r', encoding='utf-8') as f:
|
|
content = f.read()
|
|
# 限制每个文件的内容长度
|
|
if len(content) > 2000:
|
|
content = content[:2000] + "\n... (文件过长,已截断)"
|
|
contents.append(f"### {file_path}\n```\n{content}\n```")
|
|
except Exception as e:
|
|
logger.warning(f"读取文件失败: {file_path}: {e}")
|
|
|
|
return "\n\n".join(contents)
|
|
|
|
async def _call_llm(self, context: ExecutionContext) -> str:
|
|
"""调用 LLM 执行任务"""
|
|
if not self.llm:
|
|
# 如果没有配置 LLM 服务,使用模拟响应
|
|
return await self._mock_llm_response(context)
|
|
|
|
response = await self.llm.route_task(
|
|
task=context.messages[-1].content,
|
|
messages=context.messages,
|
|
preferred_model=context.agent_model
|
|
)
|
|
|
|
logger.info(f"LLM 调用完成: {response.provider}/{response.model}, "
|
|
f"tokens: {response.tokens_used}, latency: {response.latency:.2f}s")
|
|
|
|
return response.content
|
|
|
|
async def _mock_llm_response(self, context: ExecutionContext) -> str:
|
|
"""模拟 LLM 响应(用于测试)"""
|
|
await asyncio.sleep(0.5) # 模拟延迟
|
|
return f"""[模拟响应]
|
|
|
|
作为 {context.agent_role},我对任务的分析如下:
|
|
|
|
任务需要处理的文件: {', '.join(context.required_files) or '无'}
|
|
|
|
## 分析
|
|
|
|
这是一个模拟响应,表示系统正在正常工作。
|
|
|
|
## 建议
|
|
|
|
1. 配置 LLM API 密钥以启用真实 AI 能力
|
|
2. 在环境变量中设置 ANTHROPIC_API_KEY 或 DEEPSEEK_API_KEY
|
|
3. 重启服务后即可使用完整功能
|
|
|
|
---
|
|
*Agent ID: {context.agent_id}*
|
|
*任务 ID: {context.task_id}*
|
|
"""
|
|
|
|
async def _process_result(
|
|
self,
|
|
llm_output: str,
|
|
context: ExecutionContext
|
|
) -> Result:
|
|
"""处理 LLM 输出,返回格式化结果"""
|
|
execution_time = time.time() - context.start_time
|
|
|
|
return Result(
|
|
success=True,
|
|
output=llm_output,
|
|
metadata={
|
|
"agent_id": context.agent_id,
|
|
"agent_role": context.agent_role,
|
|
"agent_model": context.agent_model,
|
|
"task_id": context.task_id,
|
|
"required_files": context.required_files,
|
|
"acquired_locks": context.acquired_locks,
|
|
"execution_time": execution_time
|
|
},
|
|
execution_time=execution_time
|
|
)
|
|
|
|
async def create_execution_plan(
|
|
self,
|
|
agent: AgentInfo,
|
|
task: str
|
|
) -> ExecutionPlan:
|
|
"""
|
|
创建任务执行计划
|
|
|
|
使用 LLM 分析任务,生成详细的执行步骤
|
|
"""
|
|
if not self.llm:
|
|
return self._create_mock_plan(task)
|
|
|
|
plan_prompt = f"""
|
|
请分析以下任务,生成执行计划。
|
|
|
|
任务: {task}
|
|
|
|
请返回 JSON 格式的执行计划,包含:
|
|
{{
|
|
"steps": ["步骤1", "步骤2", ...],
|
|
"required_files": ["file1.py", "file2.js", ...],
|
|
"estimated_duration": "预计时间",
|
|
"complexity": "simple|medium|complex",
|
|
"requires_code_execution": true/false,
|
|
"subtasks": ["子任务1", "子任务2", ...]
|
|
}}
|
|
"""
|
|
|
|
try:
|
|
response = await self.llm.route_task(
|
|
task=plan_prompt,
|
|
messages=[LLMMessage(role="user", content=plan_prompt)]
|
|
)
|
|
|
|
# 尝试解析 JSON
|
|
import json
|
|
plan_data = json.loads(response)
|
|
|
|
return ExecutionPlan(
|
|
steps=plan_data.get("steps", []),
|
|
required_files=plan_data.get("required_files", []),
|
|
estimated_duration=plan_data.get("estimated_duration", ""),
|
|
complexity=plan_data.get("complexity", "medium"),
|
|
requires_code_execution=plan_data.get("requires_code_execution", False),
|
|
subtasks=plan_data.get("subtasks", [])
|
|
)
|
|
except Exception as e:
|
|
logger.warning(f"解析执行计划失败: {e}")
|
|
return self._create_mock_plan(task)
|
|
|
|
def _create_mock_plan(self, task: str) -> ExecutionPlan:
|
|
"""创建模拟执行计划"""
|
|
return ExecutionPlan(
|
|
steps=[
|
|
"1. 分析任务需求",
|
|
"2. 查看相关文件",
|
|
"3. 制定实现方案",
|
|
"4. 执行实现"
|
|
],
|
|
estimated_duration="5-10 分钟",
|
|
complexity="medium"
|
|
)
|
|
|
|
|
|
# 单例获取函数
|
|
_executor: Optional[AgentExecutor] = None
|
|
|
|
|
|
def get_agent_executor(llm_service: ModelRouter = None) -> AgentExecutor:
|
|
"""获取 Agent 执行引擎单例"""
|
|
global _executor
|
|
if _executor is None:
|
|
_executor = AgentExecutor(llm_service=llm_service)
|
|
return _executor
|
|
|
|
|
|
def reset_agent_executor():
|
|
"""重置执行引擎(主要用于测试)"""
|
|
global _executor
|
|
_executor = None
|