重构 API 路由并新增工作流编排功能
后端: - 重构 agents, heartbeats, locks, meetings, resources, roles, workflows 路由 - 新增 orchestrator 和 providers 路由 - 新增 CLI 调用器和流程编排服务 - 添加日志配置和依赖项 前端: - 更新 AgentsPage、SettingsPage、WorkflowPage 页面 - 扩展 api.ts 新增 API 接口 其他: - 清理测试 agent 数据文件 - 新增示例工作流和项目审计报告 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,279 @@
|
||||
"""
|
||||
CLI 调用器
|
||||
|
||||
通过子进程调用真实的 AI CLI 工具(Claude Code / Kimi CLI / OpenCode),
|
||||
将 prompt 发送给 CLI 并捕获输出。
|
||||
|
||||
支持的 CLI:
|
||||
- claude: Claude Code CLI(使用 -p 参数发送单轮 prompt)
|
||||
- kimi: Kimi CLI(使用 -p 参数发送单轮 prompt)
|
||||
- opencode: OpenCode CLI
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import shutil
|
||||
from typing import Optional, Tuple
|
||||
from dataclasses import dataclass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# CLI 命令映射:model 前缀 → (二进制名, 构造参数的函数)
|
||||
CLI_REGISTRY = {
|
||||
"claude": "claude",
|
||||
"kimi": "kimi",
|
||||
"opencode": "opencode",
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class CLIResult:
|
||||
"""CLI 调用结果"""
|
||||
content: str
|
||||
cli_name: str
|
||||
exit_code: int
|
||||
latency: float
|
||||
success: bool
|
||||
error: str = ""
|
||||
|
||||
|
||||
def detect_available_clis() -> dict:
|
||||
"""检测系统中可用的 CLI 工具"""
|
||||
available = {}
|
||||
for name, binary in CLI_REGISTRY.items():
|
||||
path = shutil.which(binary)
|
||||
if path:
|
||||
available[name] = path
|
||||
return available
|
||||
|
||||
|
||||
def resolve_cli(model: str) -> Optional[str]:
|
||||
"""
|
||||
根据 agent 的 model 字段判断应使用哪个 CLI
|
||||
|
||||
规则:
|
||||
- 以 "claude" 开头 → claude CLI
|
||||
- 以 "kimi" 开头 → kimi CLI
|
||||
- 以 "opencode" 开头 → opencode CLI
|
||||
- 完全匹配 CLI 名 → 直接使用
|
||||
"""
|
||||
model_lower = model.lower().strip()
|
||||
|
||||
for prefix in CLI_REGISTRY:
|
||||
if model_lower.startswith(prefix):
|
||||
return prefix
|
||||
|
||||
if model_lower in CLI_REGISTRY:
|
||||
return model_lower
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def invoke_cli(
|
||||
cli_name: str,
|
||||
prompt: str,
|
||||
timeout: int = 120,
|
||||
max_tokens: int = 1024,
|
||||
system_prompt: str = "",
|
||||
) -> CLIResult:
|
||||
"""
|
||||
调用指定的 CLI 工具并返回结果
|
||||
|
||||
参数:
|
||||
cli_name: CLI 名称(claude / kimi / opencode)
|
||||
prompt: 要发送的 prompt
|
||||
timeout: 超时秒数
|
||||
max_tokens: 最大 token 数
|
||||
"""
|
||||
binary = CLI_REGISTRY.get(cli_name)
|
||||
if not binary:
|
||||
return CLIResult(
|
||||
content="", cli_name=cli_name, exit_code=-1,
|
||||
latency=0, success=False, error=f"未知 CLI: {cli_name}"
|
||||
)
|
||||
|
||||
# 必须获取完整路径,否则 subprocess 在不同环境下可能找不到
|
||||
full_path = shutil.which(binary)
|
||||
if not full_path:
|
||||
return CLIResult(
|
||||
content="", cli_name=cli_name, exit_code=-1,
|
||||
latency=0, success=False, error=f"CLI 未安装: {binary}"
|
||||
)
|
||||
|
||||
cmd = _build_command(cli_name, prompt, max_tokens, full_path, system_prompt)
|
||||
logger.info(f"调用 CLI [{cli_name}]: {full_path} (prompt 长度={len(prompt)})")
|
||||
|
||||
# Windows 下需要设置 PYTHONIOENCODING 解决 GBK 编码问题
|
||||
env = dict(os.environ)
|
||||
env["PYTHONIOENCODING"] = "utf-8"
|
||||
|
||||
start = time.time()
|
||||
|
||||
try:
|
||||
proc = await asyncio.create_subprocess_exec(
|
||||
*cmd,
|
||||
stdin=asyncio.subprocess.PIPE,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
env=env,
|
||||
)
|
||||
|
||||
try:
|
||||
# 立即关闭 stdin,防止 CLI 阻塞等待输入
|
||||
stdout, stderr = await asyncio.wait_for(
|
||||
proc.communicate(input=b""), timeout=timeout
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
proc.kill()
|
||||
await proc.communicate()
|
||||
return CLIResult(
|
||||
content="", cli_name=cli_name, exit_code=-1,
|
||||
latency=time.time() - start, success=False,
|
||||
error=f"CLI 超时 ({timeout}s)"
|
||||
)
|
||||
|
||||
latency = time.time() - start
|
||||
stdout_text = stdout.decode("utf-8", errors="replace").strip()
|
||||
stderr_text = stderr.decode("utf-8", errors="replace").strip()
|
||||
|
||||
# 过滤掉 OpenCode 的 INFO 日志行和 kimi 的框线
|
||||
stdout_text = _clean_output(cli_name, stdout_text)
|
||||
|
||||
if proc.returncode == 0 and stdout_text:
|
||||
logger.info(f"CLI [{cli_name}] 完成: {latency:.1f}s, {len(stdout_text)} chars")
|
||||
return CLIResult(
|
||||
content=stdout_text,
|
||||
cli_name=cli_name,
|
||||
exit_code=0,
|
||||
latency=round(latency, 2),
|
||||
success=True,
|
||||
)
|
||||
else:
|
||||
error_msg = stderr_text or f"退出码 {proc.returncode}"
|
||||
logger.warning(f"CLI [{cli_name}] 失败: {error_msg}")
|
||||
return CLIResult(
|
||||
content=stdout_text or "",
|
||||
cli_name=cli_name,
|
||||
exit_code=proc.returncode or -1,
|
||||
latency=round(latency, 2),
|
||||
success=False,
|
||||
error=error_msg,
|
||||
)
|
||||
|
||||
except FileNotFoundError:
|
||||
return CLIResult(
|
||||
content="", cli_name=cli_name, exit_code=-1,
|
||||
latency=0, success=False, error=f"找不到命令: {binary}"
|
||||
)
|
||||
except Exception as e:
|
||||
return CLIResult(
|
||||
content="", cli_name=cli_name, exit_code=-1,
|
||||
latency=time.time() - start, success=False, error=str(e)
|
||||
)
|
||||
|
||||
|
||||
def _build_command(
|
||||
cli_name: str, prompt: str, max_tokens: int, full_path: str, system_prompt: str = ""
|
||||
) -> list:
|
||||
"""
|
||||
为不同 CLI 构造命令行参数
|
||||
|
||||
使用完整二进制路径确保跨环境兼容
|
||||
"""
|
||||
default_sys = (
|
||||
"这是一个角色扮演讨论场景,不是编程任务。"
|
||||
"请直接用中文回答,不要使用任何工具、不要读取文件、不要执行代码。"
|
||||
"直接给出你作为角色的观点和建议,2-3句话即可。"
|
||||
)
|
||||
sys_prompt = system_prompt or default_sys
|
||||
|
||||
if cli_name == "claude":
|
||||
return [
|
||||
full_path,
|
||||
"-p", prompt,
|
||||
"--output-format", "text",
|
||||
"--system-prompt", sys_prompt,
|
||||
]
|
||||
elif cli_name == "kimi":
|
||||
return [
|
||||
full_path,
|
||||
"-p", f"{sys_prompt}\n\n{prompt}",
|
||||
]
|
||||
elif cli_name == "opencode":
|
||||
return [
|
||||
full_path,
|
||||
"run", f"{sys_prompt}\n\n{prompt}",
|
||||
"--model", "opencode/minimax-m2.5-free",
|
||||
]
|
||||
else:
|
||||
return [full_path, "-p", prompt]
|
||||
|
||||
|
||||
def _clean_output(cli_name: str, text: str) -> str:
|
||||
"""清理 CLI 输出中的框线、日志、prompt 回显等噪音"""
|
||||
if cli_name == "kimi":
|
||||
return _clean_kimi_output(text)
|
||||
|
||||
lines = text.splitlines()
|
||||
cleaned = []
|
||||
for line in lines:
|
||||
if line.strip().startswith("INFO "):
|
||||
continue
|
||||
cleaned.append(line)
|
||||
result = "\n".join(cleaned).strip()
|
||||
return result if result else text.strip()
|
||||
|
||||
|
||||
def _clean_kimi_output(text: str) -> str:
|
||||
"""
|
||||
Kimi CLI 输出格式:
|
||||
┌─────────────────────┐
|
||||
│ (prompt 回显) │
|
||||
└─────────────────────┘
|
||||
• 思考过程...
|
||||
• 实际回复内容
|
||||
|
||||
需要:1) 移除框线和框内的 prompt 回显
|
||||
2) 只保留最后一个 bullet 作为实际回复
|
||||
"""
|
||||
lines = text.splitlines()
|
||||
|
||||
# 找到框线结束位置(最后一个 └ 或 ╰ 行)
|
||||
box_end = -1
|
||||
for i, line in enumerate(lines):
|
||||
stripped = line.strip()
|
||||
if stripped and stripped[0] in "└╰" and all(
|
||||
c in "└┘─╰╯ " for c in stripped
|
||||
):
|
||||
box_end = i
|
||||
|
||||
# 跳过框线区域
|
||||
content_lines = lines[box_end + 1:] if box_end >= 0 else lines
|
||||
|
||||
# Kimi 用 • 输出思考过程和最终回复,最后一个 • 块通常是实际回复
|
||||
bullets = []
|
||||
current_bullet = []
|
||||
for line in content_lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
if current_bullet:
|
||||
current_bullet.append(line)
|
||||
continue
|
||||
if stripped.startswith("• ") or stripped.startswith("? "):
|
||||
if current_bullet:
|
||||
bullets.append("\n".join(current_bullet))
|
||||
current_bullet = [stripped.lstrip("•? ").strip()]
|
||||
elif current_bullet:
|
||||
current_bullet.append(stripped)
|
||||
|
||||
if current_bullet:
|
||||
bullets.append("\n".join(current_bullet))
|
||||
|
||||
if not bullets:
|
||||
return text.strip()
|
||||
|
||||
# 最后一个 bullet 是实际回复
|
||||
return bullets[-1].strip()
|
||||
@@ -0,0 +1,572 @@
|
||||
"""
|
||||
工作流编排器
|
||||
|
||||
串联 WorkflowEngine + MeetingRecorder + LLM Service,
|
||||
自动驱动整个工作流:加载 → 逐节点执行 → 记录讨论 → 达成共识。
|
||||
|
||||
支持两种模式:
|
||||
- 有 LLM API Key:调用真实模型生成讨论内容
|
||||
- 无 API Key:使用内置模拟,仍然走完整流程
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
from typing import Dict, List, Optional, Any
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from enum import Enum
|
||||
|
||||
from .workflow_engine import get_workflow_engine, WorkflowMeeting
|
||||
from .meeting_recorder import get_meeting_recorder
|
||||
from .agent_registry import get_agent_registry, AgentInfo
|
||||
from .heartbeat import get_heartbeat_service
|
||||
from .llm_service import get_llm_service, LLMMessage, LLMResponse, LLMConfig
|
||||
from .cli_invoker import resolve_cli, invoke_cli, detect_available_clis
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OrchestratorStatus(str, Enum):
|
||||
IDLE = "idle"
|
||||
RUNNING = "running"
|
||||
PAUSED = "paused"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentTurn:
|
||||
"""一次 Agent 发言记录"""
|
||||
agent_id: str
|
||||
agent_name: str
|
||||
role: str
|
||||
model: str
|
||||
content: str
|
||||
timestamp: float
|
||||
latency: float = 0.0
|
||||
tokens_used: int = 0
|
||||
is_mock: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class MeetingResult:
|
||||
"""一次会议的完整结果"""
|
||||
meeting_id: str
|
||||
title: str
|
||||
node_type: str
|
||||
turns: List[AgentTurn] = field(default_factory=list)
|
||||
consensus: str = ""
|
||||
started_at: float = 0
|
||||
finished_at: float = 0
|
||||
status: str = "pending"
|
||||
|
||||
|
||||
@dataclass
|
||||
class OrchestrationRun:
|
||||
"""一次编排运行的完整状态"""
|
||||
run_id: str
|
||||
workflow_id: str
|
||||
workflow_name: str
|
||||
status: OrchestratorStatus = OrchestratorStatus.IDLE
|
||||
current_node: str = ""
|
||||
meeting_results: List[MeetingResult] = field(default_factory=list)
|
||||
started_at: float = 0
|
||||
finished_at: float = 0
|
||||
error: str = ""
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
return {
|
||||
"run_id": self.run_id,
|
||||
"workflow_id": self.workflow_id,
|
||||
"workflow_name": self.workflow_name,
|
||||
"status": self.status.value,
|
||||
"current_node": self.current_node,
|
||||
"meeting_results": [
|
||||
{
|
||||
"meeting_id": mr.meeting_id,
|
||||
"title": mr.title,
|
||||
"node_type": mr.node_type,
|
||||
"status": mr.status,
|
||||
"turns": [asdict(t) for t in mr.turns],
|
||||
"consensus": mr.consensus,
|
||||
"started_at": mr.started_at,
|
||||
"finished_at": mr.finished_at,
|
||||
}
|
||||
for mr in self.meeting_results
|
||||
],
|
||||
"started_at": self.started_at,
|
||||
"finished_at": self.finished_at,
|
||||
"error": self.error,
|
||||
"elapsed_seconds": round(
|
||||
(self.finished_at or time.time()) - self.started_at, 1
|
||||
) if self.started_at else 0,
|
||||
}
|
||||
|
||||
|
||||
# 每个角色在不同会议主题下的 system prompt
|
||||
DINNER_ROLE_PROMPTS = {
|
||||
"chef": (
|
||||
"你是团队的美食达人/大厨。你对各种菜系了如指掌,"
|
||||
"关注口味、食材新鲜度和烹饪方式。请用简短生动的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
"health": (
|
||||
"你是团队的健康顾问/营养师。你关注饮食均衡、热量控制和食品安全。"
|
||||
"请用简短专业的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
"budget": (
|
||||
"你是团队的预算管理者/财务。你关注性价比、人均消费和优惠活动。"
|
||||
"请用简短务实的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
"pm": (
|
||||
"你是产品经理,负责综合各方意见做最终决策。"
|
||||
"请用简短有决断力的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
"architect": (
|
||||
"你是系统架构师,逻辑严谨,擅长分析和对比。"
|
||||
"请用简短条理清晰的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
"developer": (
|
||||
"你是开发工程师,务实高效,喜欢简单直接的方案。"
|
||||
"请用简短直接的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
"qa": (
|
||||
"你是质量保证工程师,注重细节和风险控制。"
|
||||
"请用简短谨慎的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
"reviewer": (
|
||||
"你是代码审查专家,善于发现问题和提出改进。"
|
||||
"请用简短有见地的语言(2-3句话)表达你的观点。"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _get_role_prompt(role: str) -> str:
|
||||
"""根据角色获取 system prompt,未匹配则使用通用提示"""
|
||||
return DINNER_ROLE_PROMPTS.get(
|
||||
role,
|
||||
f"你是团队中的{role}角色。请用简短的语言(2-3句话)表达你的观点。"
|
||||
)
|
||||
|
||||
|
||||
class WorkflowOrchestrator:
|
||||
"""
|
||||
工作流编排器
|
||||
|
||||
自动驱动工作流中的每个节点:
|
||||
- meeting 节点:逐个让 Agent 调用 LLM 发言,最后生成共识
|
||||
- execution 节点:模拟执行并标记完成
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._runs: Dict[str, OrchestrationRun] = {}
|
||||
self._running_task: Optional[asyncio.Task] = None
|
||||
|
||||
def get_run(self, run_id: str) -> Optional[OrchestrationRun]:
|
||||
return self._runs.get(run_id)
|
||||
|
||||
def list_runs(self) -> List[Dict]:
|
||||
return [r.to_dict() for r in self._runs.values()]
|
||||
|
||||
async def start_workflow(
|
||||
self,
|
||||
workflow_path: str,
|
||||
agent_overrides: Dict[str, str] = None,
|
||||
) -> OrchestrationRun:
|
||||
"""
|
||||
启动一个工作流的自动编排
|
||||
|
||||
参数:
|
||||
workflow_path: YAML 文件名(如 dinner-decision.yaml)
|
||||
agent_overrides: 可选的 agent_id → model 覆盖映射
|
||||
返回:
|
||||
OrchestrationRun 对象(后台异步执行)
|
||||
"""
|
||||
engine = get_workflow_engine()
|
||||
workflow = await engine.load_workflow(workflow_path)
|
||||
|
||||
run = OrchestrationRun(
|
||||
run_id=f"run-{uuid.uuid4().hex[:8]}",
|
||||
workflow_id=workflow.workflow_id,
|
||||
workflow_name=workflow.name,
|
||||
status=OrchestratorStatus.RUNNING,
|
||||
started_at=time.time(),
|
||||
)
|
||||
self._runs[run.run_id] = run
|
||||
|
||||
# 在后台启动编排
|
||||
self._running_task = asyncio.create_task(
|
||||
self._run_workflow(run, workflow_path, agent_overrides or {})
|
||||
)
|
||||
|
||||
return run
|
||||
|
||||
async def _run_workflow(
|
||||
self,
|
||||
run: OrchestrationRun,
|
||||
workflow_path: str,
|
||||
agent_overrides: Dict[str, str],
|
||||
):
|
||||
"""后台执行完整工作流"""
|
||||
engine = get_workflow_engine()
|
||||
|
||||
try:
|
||||
while True:
|
||||
next_node = await engine.get_next_meeting(run.workflow_id)
|
||||
if next_node is None:
|
||||
break
|
||||
|
||||
run.current_node = next_node.meeting_id
|
||||
logger.info(f"[{run.run_id}] 开始节点: {next_node.title} ({next_node.node_type})")
|
||||
|
||||
if next_node.node_type == "meeting":
|
||||
result = await self._run_meeting_node(
|
||||
run, next_node, agent_overrides
|
||||
)
|
||||
else:
|
||||
result = await self._run_execution_node(
|
||||
run, next_node, agent_overrides
|
||||
)
|
||||
|
||||
run.meeting_results.append(result)
|
||||
|
||||
# 标记节点完成
|
||||
await engine.complete_meeting(run.workflow_id, next_node.meeting_id)
|
||||
logger.info(f"[{run.run_id}] 节点完成: {next_node.title}")
|
||||
|
||||
run.status = OrchestratorStatus.COMPLETED
|
||||
run.finished_at = time.time()
|
||||
run.current_node = ""
|
||||
logger.info(f"[{run.run_id}] 工作流完成,耗时 {run.finished_at - run.started_at:.1f}s")
|
||||
|
||||
except Exception as e:
|
||||
run.status = OrchestratorStatus.FAILED
|
||||
run.error = str(e)
|
||||
run.finished_at = time.time()
|
||||
logger.error(f"[{run.run_id}] 工作流失败: {e}", exc_info=True)
|
||||
|
||||
async def _run_meeting_node(
|
||||
self,
|
||||
run: OrchestrationRun,
|
||||
node: WorkflowMeeting,
|
||||
agent_overrides: Dict[str, str],
|
||||
) -> MeetingResult:
|
||||
"""执行一个会议节点:各 Agent 依次发言 → 生成共识"""
|
||||
registry = get_agent_registry()
|
||||
recorder = get_meeting_recorder()
|
||||
heartbeat = get_heartbeat_service()
|
||||
|
||||
result = MeetingResult(
|
||||
meeting_id=node.meeting_id,
|
||||
title=node.title,
|
||||
node_type="meeting",
|
||||
started_at=time.time(),
|
||||
status="in_progress",
|
||||
)
|
||||
|
||||
# 创建会议记录
|
||||
steps = ["提议", "讨论", "共识"]
|
||||
await recorder.create_meeting(
|
||||
meeting_id=node.meeting_id,
|
||||
title=node.title,
|
||||
attendees=node.attendees,
|
||||
steps=steps,
|
||||
)
|
||||
await recorder.update_progress(node.meeting_id, "提议")
|
||||
|
||||
# 收集之前节点的讨论上下文
|
||||
previous_context = self._build_previous_context(run)
|
||||
|
||||
# 逐个 Agent 发言
|
||||
for agent_id in node.attendees:
|
||||
agent = await registry.get_agent(agent_id)
|
||||
if not agent:
|
||||
logger.warning(f"Agent {agent_id} 未注册,跳过")
|
||||
continue
|
||||
|
||||
# 更新心跳
|
||||
await heartbeat.update_heartbeat(
|
||||
agent_id, "working", f"参与会议: {node.title}", 50
|
||||
)
|
||||
|
||||
# 用 LLM 生成发言
|
||||
model = agent_overrides.get(agent_id, agent.model)
|
||||
turn = await self._generate_agent_turn(
|
||||
agent, model, node.title, previous_context, result.turns
|
||||
)
|
||||
result.turns.append(turn)
|
||||
|
||||
# 写入会议记录
|
||||
await recorder.add_discussion(
|
||||
meeting_id=node.meeting_id,
|
||||
agent_id=agent.agent_id,
|
||||
agent_name=agent.name,
|
||||
content=turn.content,
|
||||
step="讨论",
|
||||
)
|
||||
|
||||
# 恢复心跳
|
||||
await heartbeat.update_heartbeat(agent_id, "idle", "", 100)
|
||||
|
||||
# 生成共识
|
||||
await recorder.update_progress(node.meeting_id, "共识")
|
||||
consensus = await self._generate_consensus(node, result.turns)
|
||||
result.consensus = consensus
|
||||
|
||||
# 完成会议
|
||||
await recorder.end_meeting(node.meeting_id, consensus=consensus)
|
||||
|
||||
result.status = "completed"
|
||||
result.finished_at = time.time()
|
||||
return result
|
||||
|
||||
async def _run_execution_node(
|
||||
self,
|
||||
run: OrchestrationRun,
|
||||
node: WorkflowMeeting,
|
||||
agent_overrides: Dict[str, str],
|
||||
) -> MeetingResult:
|
||||
"""执行一个 execution 节点:模拟任务执行"""
|
||||
registry = get_agent_registry()
|
||||
heartbeat = get_heartbeat_service()
|
||||
engine = get_workflow_engine()
|
||||
|
||||
result = MeetingResult(
|
||||
meeting_id=node.meeting_id,
|
||||
title=node.title,
|
||||
node_type="execution",
|
||||
started_at=time.time(),
|
||||
status="in_progress",
|
||||
)
|
||||
|
||||
# 获取上一个会议的共识作为执行指令
|
||||
last_consensus = ""
|
||||
for mr in reversed(run.meeting_results):
|
||||
if mr.consensus:
|
||||
last_consensus = mr.consensus
|
||||
break
|
||||
|
||||
for agent_id in node.attendees:
|
||||
agent = await registry.get_agent(agent_id)
|
||||
if not agent:
|
||||
continue
|
||||
|
||||
await heartbeat.update_heartbeat(
|
||||
agent_id, "working", f"执行: {node.title}", 30
|
||||
)
|
||||
|
||||
model = agent_overrides.get(agent_id, agent.model)
|
||||
turn = await self._generate_execution_turn(
|
||||
agent, model, node.title, last_consensus
|
||||
)
|
||||
result.turns.append(turn)
|
||||
|
||||
# 向工作流引擎签到
|
||||
await engine.join_execution_node(
|
||||
run.workflow_id, node.meeting_id, agent_id
|
||||
)
|
||||
|
||||
await heartbeat.update_heartbeat(agent_id, "idle", "", 100)
|
||||
|
||||
result.status = "completed"
|
||||
result.finished_at = time.time()
|
||||
return result
|
||||
|
||||
async def _generate_agent_turn(
|
||||
self,
|
||||
agent: AgentInfo,
|
||||
model: str,
|
||||
meeting_title: str,
|
||||
previous_context: str,
|
||||
existing_turns: List[AgentTurn],
|
||||
) -> AgentTurn:
|
||||
"""
|
||||
调用 LLM 为一个 Agent 生成会议发言
|
||||
|
||||
若 LLM 不可用则使用内置 mock
|
||||
"""
|
||||
role_prompt = _get_role_prompt(agent.role)
|
||||
|
||||
# 构建其他 Agent 已发言的内容
|
||||
others_said = ""
|
||||
for t in existing_turns:
|
||||
content = t.content[:150] if len(t.content) > 150 else t.content
|
||||
others_said += f" {t.agent_name}: {content}\n"
|
||||
|
||||
# 直接把所有信息揉进一段连贯的指令中
|
||||
prompt = (
|
||||
f"场景:团队正在讨论「{meeting_title}」。\n"
|
||||
f"你的角色:{agent.name}。{role_prompt}\n"
|
||||
)
|
||||
if previous_context:
|
||||
prompt += f"\n上一轮讨论的结论:{previous_context}\n"
|
||||
if others_said:
|
||||
prompt += f"\n已有发言:\n{others_said}\n"
|
||||
prompt += (
|
||||
f"\n请以{agent.name}的身份,直接给出2-3句具体建议。"
|
||||
f"不要自我介绍,不要提问,不要使用工具,直接说你的观点。"
|
||||
)
|
||||
|
||||
messages = [
|
||||
LLMMessage(role="user", content=prompt),
|
||||
]
|
||||
|
||||
start = time.time()
|
||||
content, tokens, is_mock = await self._call_llm(model, messages)
|
||||
latency = time.time() - start
|
||||
|
||||
return AgentTurn(
|
||||
agent_id=agent.agent_id,
|
||||
agent_name=agent.name,
|
||||
role=agent.role,
|
||||
model=model,
|
||||
content=content,
|
||||
timestamp=time.time(),
|
||||
latency=round(latency, 2),
|
||||
tokens_used=tokens,
|
||||
is_mock=is_mock,
|
||||
)
|
||||
|
||||
async def _generate_execution_turn(
|
||||
self,
|
||||
agent: AgentInfo,
|
||||
model: str,
|
||||
task_title: str,
|
||||
consensus: str,
|
||||
) -> AgentTurn:
|
||||
"""为执行节点生成 Agent 的执行结果"""
|
||||
prompt = (
|
||||
f"你是{agent.name}。团队讨论后做出了以下决定:\n{consensus}\n\n"
|
||||
f"现在需要你执行「{task_title}」这个任务。"
|
||||
f"请用2-3句话直接汇报你的执行结果和下一步安排。"
|
||||
)
|
||||
messages = [
|
||||
LLMMessage(role="user", content=prompt),
|
||||
]
|
||||
|
||||
start = time.time()
|
||||
content, tokens, is_mock = await self._call_llm(model, messages)
|
||||
latency = time.time() - start
|
||||
|
||||
return AgentTurn(
|
||||
agent_id=agent.agent_id,
|
||||
agent_name=agent.name,
|
||||
role=agent.role,
|
||||
model=model,
|
||||
content=content,
|
||||
timestamp=time.time(),
|
||||
latency=round(latency, 2),
|
||||
tokens_used=tokens,
|
||||
is_mock=is_mock,
|
||||
)
|
||||
|
||||
async def _generate_consensus(
|
||||
self,
|
||||
node: WorkflowMeeting,
|
||||
turns: List[AgentTurn],
|
||||
) -> str:
|
||||
"""基于所有发言生成会议共识,使用 kimi CLI 效果最佳"""
|
||||
discussion_summary = ""
|
||||
for t in turns:
|
||||
# 截取每人发言前 200 字,避免 prompt 过长
|
||||
content = t.content[:200] if len(t.content) > 200 else t.content
|
||||
discussion_summary += f" {t.agent_name}: {content}\n"
|
||||
|
||||
prompt = (
|
||||
f"请总结以下关于「{node.title}」的讨论,用3-5句话给出共识。\n\n"
|
||||
f"讨论记录:\n{discussion_summary}\n"
|
||||
f"要求:直接输出总结,包含最终决定和行动方案。不要提问。"
|
||||
)
|
||||
|
||||
messages = [
|
||||
LLMMessage(role="user", content=prompt),
|
||||
]
|
||||
|
||||
# 优先用 kimi CLI 做总结(它最擅长按指令行事)
|
||||
content, _, _ = await self._call_llm("kimi", messages)
|
||||
return content
|
||||
|
||||
async def _call_llm(
|
||||
self, model: str, messages: List[LLMMessage]
|
||||
) -> tuple:
|
||||
"""
|
||||
调用 AI 生成内容,优先级:CLI → LLM API → 报错
|
||||
|
||||
返回: (content, tokens_used, is_mock)
|
||||
"""
|
||||
# 分离 system prompt 和 user prompt
|
||||
system_prompt = ""
|
||||
user_prompt = ""
|
||||
for m in messages:
|
||||
if m.role == "system":
|
||||
system_prompt += m.content + "\n"
|
||||
else:
|
||||
user_prompt += m.content + "\n"
|
||||
user_prompt = user_prompt.strip()
|
||||
|
||||
# 1. 优先尝试 CLI
|
||||
cli_name = resolve_cli(model) if model else None
|
||||
if cli_name:
|
||||
logger.info(f"使用 CLI [{cli_name}] (model={model})")
|
||||
result = await invoke_cli(
|
||||
cli_name, user_prompt, timeout=120,
|
||||
system_prompt=system_prompt.strip(),
|
||||
)
|
||||
if result.success:
|
||||
return result.content, 0, False
|
||||
else:
|
||||
logger.warning(f"CLI [{cli_name}] 失败: {result.error},尝试其他方式")
|
||||
|
||||
# 2. 如果 model 未指定 CLI 或 CLI 失败,尝试任意可用 CLI
|
||||
available = detect_available_clis()
|
||||
if available:
|
||||
fallback_cli = list(available.keys())[0]
|
||||
logger.info(f"使用 fallback CLI [{fallback_cli}]")
|
||||
result = await invoke_cli(
|
||||
fallback_cli, user_prompt, timeout=120,
|
||||
system_prompt=system_prompt.strip(),
|
||||
)
|
||||
if result.success:
|
||||
return result.content, 0, False
|
||||
else:
|
||||
logger.warning(f"Fallback CLI [{fallback_cli}] 也失败: {result.error}")
|
||||
|
||||
# 3. 尝试 LLM API
|
||||
try:
|
||||
llm = get_llm_service()
|
||||
providers = llm.get_available_providers()
|
||||
real_providers = [p for p in providers if p != "ollama"]
|
||||
if real_providers:
|
||||
response = await llm.route_task(
|
||||
task=messages[-1].content,
|
||||
messages=messages,
|
||||
preferred_model=model if model else None,
|
||||
)
|
||||
return response.content, response.tokens_used, False
|
||||
except Exception as e:
|
||||
logger.warning(f"LLM API 也不可用: {e}")
|
||||
|
||||
raise RuntimeError(
|
||||
f"无可用的 AI 提供商。CLI={list(available.keys()) if available else '无'},"
|
||||
f"LLM API Key 未配置。请安装 CLI 工具或配置 API Key。"
|
||||
)
|
||||
|
||||
def _build_previous_context(self, run: OrchestrationRun) -> str:
|
||||
"""从已完成的会议中构建上下文"""
|
||||
parts = []
|
||||
for mr in run.meeting_results:
|
||||
if mr.consensus:
|
||||
parts.append(f"[{mr.title}] 共识: {mr.consensus}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
# 单例
|
||||
_orchestrator: Optional[WorkflowOrchestrator] = None
|
||||
|
||||
|
||||
def get_workflow_orchestrator() -> WorkflowOrchestrator:
|
||||
"""获取编排器单例"""
|
||||
global _orchestrator
|
||||
if _orchestrator is None:
|
||||
_orchestrator = WorkflowOrchestrator()
|
||||
return _orchestrator
|
||||
Reference in New Issue
Block a user