From edbddf855d74c197894fdcf714cf2a07f203bfad Mon Sep 17 00:00:00 2001 From: Claude Code Date: Tue, 3 Feb 2026 19:20:02 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20AI=E8=81=8A=E5=A4=A9=E5=AE=A4=E5=A4=9AA?= =?UTF-8?q?gent=E5=8D=8F=E4=BD=9C=E8=AE=A8=E8=AE=BA=E5=B9=B3=E5=8F=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 实现Agent管理,支持AI辅助生成系统提示词 - 支持多个AI提供商(OpenRouter、智谱、MiniMax等) - 实现聊天室和讨论引擎 - WebSocket实时消息推送 - 前端使用React + Ant Design - 后端使用FastAPI + MongoDB Co-Authored-By: Claude Opus 4.5 --- .claude/settings.local.json | 26 + .env.example | 15 + .gitignore | 47 + CLAUDE.md | 99 + README.md | 207 ++ backend/.env.example | 16 + backend/Dockerfile | 25 + backend/adapters/__init__.py | 58 + backend/adapters/base_adapter.py | 166 + backend/adapters/deepseek_adapter.py | 197 ++ backend/adapters/gemini_adapter.py | 250 ++ backend/adapters/kimi_adapter.py | 197 ++ backend/adapters/llmstudio_adapter.py | 253 ++ backend/adapters/minimax_adapter.py | 251 ++ backend/adapters/ollama_adapter.py | 241 ++ backend/adapters/openrouter_adapter.py | 201 ++ backend/adapters/zhipu_adapter.py | 197 ++ backend/config.py | 50 + backend/database/__init__.py | 10 + backend/database/connection.py | 94 + backend/main.py | 73 + backend/models/__init__.py | 25 + backend/models/agent.py | 168 + backend/models/agent_memory.py | 123 + backend/models/ai_provider.py | 149 + backend/models/chatroom.py | 131 + backend/models/discussion_result.py | 126 + backend/models/message.py | 123 + backend/requirements.txt | 42 + backend/routers/__init__.py | 14 + backend/routers/agents.py | 314 ++ backend/routers/chatrooms.py | 387 +++ backend/routers/discussions.py | 136 + backend/routers/providers.py | 241 ++ backend/services/__init__.py | 22 + backend/services/agent_service.py | 438 +++ backend/services/ai_provider_service.py | 364 +++ backend/services/chatroom_service.py | 357 +++ backend/services/consensus_manager.py | 227 ++ backend/services/discussion_engine.py | 589 ++++ backend/services/mcp_service.py | 252 ++ backend/services/memory_service.py | 416 +++ backend/services/message_router.py | 335 ++ backend/utils/__init__.py | 13 + backend/utils/encryption.py | 97 + backend/utils/proxy_handler.py | 135 + backend/utils/rate_limiter.py | 233 ++ docker-compose.yml | 70 + frontend/Dockerfile | 31 + frontend/index.html | 13 + frontend/nginx.conf | 46 + frontend/package-lock.json | 3192 +++++++++++++++++++ frontend/package.json | 28 + frontend/src/App.tsx | 39 + frontend/src/components/AgentCard.tsx | 122 + frontend/src/components/MessageBubble.tsx | 97 + frontend/src/components/ProviderCard.tsx | 139 + frontend/src/components/Sidebar.tsx | 92 + frontend/src/components/TypingIndicator.tsx | 74 + frontend/src/index.css | 73 + frontend/src/main.tsx | 17 + frontend/src/pages/AgentManagement.tsx | 471 +++ frontend/src/pages/ChatRoom.tsx | 419 +++ frontend/src/pages/Dashboard.tsx | 199 ++ frontend/src/pages/DiscussionHistory.tsx | 245 ++ frontend/src/pages/ProviderConfig.tsx | 260 ++ frontend/src/services/api.ts | 195 ++ frontend/src/services/websocket.ts | 173 + frontend/src/stores/agentStore.ts | 103 + frontend/src/stores/chatroomStore.ts | 183 ++ frontend/src/stores/providerStore.ts | 62 + frontend/src/types/index.ts | 150 + frontend/tsconfig.json | 25 + frontend/tsconfig.node.json | 10 + frontend/vite.config.ts | 19 + 问题.md | 4 + 76 files changed, 14681 insertions(+) create mode 100644 .claude/settings.local.json create mode 100644 .env.example create mode 100644 .gitignore create mode 100644 CLAUDE.md create mode 100644 README.md create mode 100644 backend/.env.example create mode 100644 backend/Dockerfile create mode 100644 backend/adapters/__init__.py create mode 100644 backend/adapters/base_adapter.py create mode 100644 backend/adapters/deepseek_adapter.py create mode 100644 backend/adapters/gemini_adapter.py create mode 100644 backend/adapters/kimi_adapter.py create mode 100644 backend/adapters/llmstudio_adapter.py create mode 100644 backend/adapters/minimax_adapter.py create mode 100644 backend/adapters/ollama_adapter.py create mode 100644 backend/adapters/openrouter_adapter.py create mode 100644 backend/adapters/zhipu_adapter.py create mode 100644 backend/config.py create mode 100644 backend/database/__init__.py create mode 100644 backend/database/connection.py create mode 100644 backend/main.py create mode 100644 backend/models/__init__.py create mode 100644 backend/models/agent.py create mode 100644 backend/models/agent_memory.py create mode 100644 backend/models/ai_provider.py create mode 100644 backend/models/chatroom.py create mode 100644 backend/models/discussion_result.py create mode 100644 backend/models/message.py create mode 100644 backend/requirements.txt create mode 100644 backend/routers/__init__.py create mode 100644 backend/routers/agents.py create mode 100644 backend/routers/chatrooms.py create mode 100644 backend/routers/discussions.py create mode 100644 backend/routers/providers.py create mode 100644 backend/services/__init__.py create mode 100644 backend/services/agent_service.py create mode 100644 backend/services/ai_provider_service.py create mode 100644 backend/services/chatroom_service.py create mode 100644 backend/services/consensus_manager.py create mode 100644 backend/services/discussion_engine.py create mode 100644 backend/services/mcp_service.py create mode 100644 backend/services/memory_service.py create mode 100644 backend/services/message_router.py create mode 100644 backend/utils/__init__.py create mode 100644 backend/utils/encryption.py create mode 100644 backend/utils/proxy_handler.py create mode 100644 backend/utils/rate_limiter.py create mode 100644 docker-compose.yml create mode 100644 frontend/Dockerfile create mode 100644 frontend/index.html create mode 100644 frontend/nginx.conf create mode 100644 frontend/package-lock.json create mode 100644 frontend/package.json create mode 100644 frontend/src/App.tsx create mode 100644 frontend/src/components/AgentCard.tsx create mode 100644 frontend/src/components/MessageBubble.tsx create mode 100644 frontend/src/components/ProviderCard.tsx create mode 100644 frontend/src/components/Sidebar.tsx create mode 100644 frontend/src/components/TypingIndicator.tsx create mode 100644 frontend/src/index.css create mode 100644 frontend/src/main.tsx create mode 100644 frontend/src/pages/AgentManagement.tsx create mode 100644 frontend/src/pages/ChatRoom.tsx create mode 100644 frontend/src/pages/Dashboard.tsx create mode 100644 frontend/src/pages/DiscussionHistory.tsx create mode 100644 frontend/src/pages/ProviderConfig.tsx create mode 100644 frontend/src/services/api.ts create mode 100644 frontend/src/services/websocket.ts create mode 100644 frontend/src/stores/agentStore.ts create mode 100644 frontend/src/stores/chatroomStore.ts create mode 100644 frontend/src/stores/providerStore.ts create mode 100644 frontend/src/types/index.ts create mode 100644 frontend/tsconfig.json create mode 100644 frontend/tsconfig.node.json create mode 100644 frontend/vite.config.ts create mode 100644 问题.md diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 0000000..8680ad3 --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,26 @@ +{ + "permissions": { + "allow": [ + "Bash(powershell:*)", + "Bash(where:*)", + "Bash(docker:*)", + "Bash(docker-compose:*)", + "Bash(npm --version)", + "Bash(mongosh:*)", + "Bash(mongo:*)", + "mcp__web-search-prime__webSearchPrime", + "Bash(python:*)", + "Bash(set HTTP_PROXY=)", + "Bash(set HTTPS_PROXY=)", + "Bash(export:*)", + "Bash(export HTTPS_PROXY=\"\")", + "Bash(curl:*)", + "Bash(taskkill:*)", + "Bash(cmd.exe /c \"tasklist | findstr /i \"\"28876 53736 52620 24000 52468 18560 37980 51192 python\"\"\")", + "Bash(cmd.exe:*)", + "Bash(git init:*)", + "Bash(git config:*)", + "Bash(git add:*)" + ] + } +} diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..a27461a --- /dev/null +++ b/.env.example @@ -0,0 +1,15 @@ +# AI聊天室环境变量配置 + +# MongoDB配置 +MONGO_PASSWORD=chatroom123 + +# 安全配置 +SECRET_KEY=your-secret-key-change-in-production +ENCRYPTION_KEY=your-32-byte-encryption-key-here + +# 开发模式 +DEBUG=false + +# 可选:默认代理配置 +DEFAULT_HTTP_PROXY= +DEFAULT_HTTPS_PROXY= diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9d5b0bb --- /dev/null +++ b/.gitignore @@ -0,0 +1,47 @@ + +# Python +__pycache__/ +*.py[cod] +*.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +*.pyc +.env +.venv +venv/ +ENV/ +env/ + +# Node +node_modules/ +npm-debug.log +yarn-error.log +dist/ +.DS_Store + +# IDE +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Logs +*.log + diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..1093cf3 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,99 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +AI Chat Room (AI聊天室) is a multi-agent collaborative discussion platform. Users configure AI providers, create agents with different roles, and let them discuss in chat rooms to reach consensus. + +**Stack:** FastAPI (Python 3.11+) backend + React 18 (TypeScript) frontend + MongoDB database. Real-time communication via WebSockets. + +## Development Commands + +### Docker (Recommended) +```bash +# Start all services +docker-compose up -d + +# Rebuild after changes +docker-compose up -d --build + +# View logs +docker-compose logs -f backend +docker-compose logs -f frontend +``` + +### Backend (Local) +```bash +cd backend +python -m venv venv +venv\Scripts\activate # Windows: activate, Linux/Mac: source venv/bin/activate +pip install -r requirements.txt +python main.py +``` + +Backend runs on http://localhost:8000 - API docs at http://localhost:8000/docs + +### Frontend (Local) +```bash +cd frontend +npm install +npm run dev # Development server (Vite) +npm run build # Production build (tsc && vite build) +``` + +Frontend runs on http://localhost:3000 + +## Architecture + +### Backend Structure +- **[adapters/](backend/adapters/)** - AI provider integrations using Adapter pattern + - `base_adapter.py` - Abstract base class with `ChatMessage`, `AdapterResponse`, `BaseAdapter` + - Each adapter implements `chat()`, `chat_stream()`, `test_connection()` + - Supported: OpenRouter, Zhipu (智谱), MiniMax, Kimi, DeepSeek, Gemini, Ollama, LLM Studio +- **[models/](backend/models/)** - Beanie ODM documents for MongoDB +- **[services/](backend/services/)** - Business logic layer + - `discussion_engine.py` - Core multi-agent discussion orchestration + - `consensus_manager.py` - Moderator agent evaluates if consensus reached + - `message_router.py` - WebSocket message routing +- **[routers/](backend/routers/)** - FastAPI route handlers (providers, agents, chatrooms, discussions) +- **[utils/](backend/utils/)** - encryption.py (API keys), proxy_handler.py, rate_limiter.py + +### Frontend Structure +- **[src/stores/](frontend/src/stores/)** - Zustand state management +- **[src/services/](frontend/src/services/)** - API client and WebSocket client +- **[src/pages/](frontend/src/pages/)** - Dashboard, ProviderConfig, AgentManagement, ChatRoom, DiscussionHistory +- **[src/components/](frontend/src/components/)** - Reusable UI components using Ant Design + +### Key Data Flow +1. User creates agents (role + system prompt) and assigns AI providers +2. Chat room created with selected agents + optional moderator +3. Discussion started: `discussion_engine.py` orchestrates turn-based agent interactions +4. Each round: agents receive context and decide whether to speak (role relevance) +5. Moderator agent periodically checks for consensus via `consensus_manager.py` +6. WebSocket streams messages in real-time to frontend + +### Adding New AI Providers +1. Create new adapter in `backend/adapters/` inheriting from `BaseAdapter` +2. Implement async methods: `chat()`, `chat_stream()`, `test_connection()` +3. Register in `backend/adapters/__init__.py` + +## Configuration + +Environment variables in `.env`: +- `MONGODB_URL` - MongoDB connection string +- `MONGODB_DB` - Database name (default: ai_chatroom) +- `SECRET_KEY` - JWT signing key +- `ENCRYPTION_KEY` - 32-byte key for API key encryption +- `DEFAULT_HTTP_PROXY` / `DEFAULT_HTTPS_PROXY` - Proxy for overseas APIs + +Backend config in [backend/config.py](backend/config.py) - Pydantic Settings with defaults. + +## Important Notes + +- All async/await - Python async functions throughout backend +- API keys encrypted at rest using `cryptography` Fernet +- WebSocket heartbeat every 30s (`WS_HEARTBEAT_INTERVAL`) +- CORS origins configured in settings for local development +- MongoDB indexes created automatically by Beanie on startup +- Chinese language UI (README and comments in Chinese) diff --git a/README.md b/README.md new file mode 100644 index 0000000..528cc50 --- /dev/null +++ b/README.md @@ -0,0 +1,207 @@ +# AI聊天室 - 多Agent协作讨论平台 + +一个基于多AI Agent协作的聊天室系统,支持配置多种AI接口,创建具有不同角色的Agent,并让它们在聊天室中针对特定目标进行自由讨论,最终达成共识。 + +## 功能特性 + +### 1. AI接口管理 +- 支持多种AI提供商:MiniMax、智谱、OpenRouter、Kimi、DeepSeek、Gemini +- 支持本地模型:Ollama、LLM Studio +- 可配置代理(支持海外API访问) +- API密钥加密存储 +- 速率限制和连接测试 + +### 2. Agent管理 +- 创建自定义Agent角色 +- 配置系统提示词和行为参数 +- 支持记忆功能、MCP工具、多模态输入 +- 预设模板快速创建(产品经理、开发工程师、设计师等) +- Agent测试对话功能 + +### 3. 聊天室 +- 创建讨论聊天室,添加多个Agent参与 +- 设置讨论目标,启动自由讨论 +- Agent根据角色自主判断是否发言 +- 实时WebSocket消息推送 +- 支持暂停、恢复、停止讨论 + +### 4. 共识判断 +- 主持人Agent判断讨论是否达成共识 +- 自动生成讨论摘要和行动项 +- 记录未解决的问题 +- 讨论历史回放 + +## 技术架构 + +``` +├── backend/ # FastAPI后端 +│ ├── adapters/ # AI接口适配器 +│ ├── models/ # 数据模型 +│ ├── services/ # 业务逻辑 +│ ├── routers/ # API路由 +│ └── utils/ # 工具函数 +├── frontend/ # React前端 +│ ├── src/ +│ │ ├── components/ # UI组件 +│ │ ├── pages/ # 页面 +│ │ ├── stores/ # 状态管理 +│ │ └── services/ # API服务 +└── docker-compose.yml # Docker编排 +``` + +## 快速开始 + +### 使用Docker(推荐) + +1. 克隆项目并进入目录: +```bash +cd AIChatRoom +``` + +2. 复制环境变量配置: +```bash +cp .env.example .env +# 编辑 .env 文件,修改密钥等配置 +``` + +3. 启动服务: +```bash +docker-compose up -d +``` + +4. 访问应用: +- 前端:http://localhost:3000 +- 后端API:http://localhost:8000 +- API文档:http://localhost:8000/docs + +### 本地开发 + +#### 后端 + +1. 创建Python虚拟环境: +```bash +cd backend +python -m venv venv +source venv/bin/activate # Windows: venv\Scripts\activate +``` + +2. 安装依赖: +```bash +pip install -r requirements.txt +``` + +3. 配置环境变量: +```bash +cp .env.example .env +# 编辑 .env 文件 +``` + +4. 启动MongoDB: +```bash +# 确保MongoDB已安装并运行 +mongod +``` + +5. 启动后端: +```bash +python main.py +``` + +#### 前端 + +1. 安装依赖: +```bash +cd frontend +npm install +``` + +2. 启动开发服务器: +```bash +npm run dev +``` + +## 使用指南 + +### 1. 配置AI接口 + +1. 进入"AI接口配置"页面 +2. 点击"添加接口" +3. 选择提供商类型,填写API密钥和模型名称 +4. 如需代理,开启"使用代理"并填写代理地址 +5. 点击测试验证连接 + +### 2. 创建Agent + +1. 进入"Agent管理"页面 +2. 使用预设模板或点击"创建Agent" +3. 填写名称、角色、系统提示词 +4. 选择关联的AI接口 +5. 配置行为参数(温度、发言倾向等) + +### 3. 开始讨论 + +1. 进入"聊天室"页面 +2. 创建新聊天室,选择参与的Agent +3. 可指定主持人Agent用于共识判断 +4. 点击"开始讨论",输入讨论目标 +5. 观察Agent自由讨论,等待达成共识 + +## API文档 + +启动后端后访问 http://localhost:8000/docs 查看完整的Swagger API文档。 + +### 主要端点 + +- `POST /api/providers` - 创建AI接口配置 +- `POST /api/agents` - 创建Agent +- `POST /api/chatrooms` - 创建聊天室 +- `POST /api/chatrooms/{id}/start` - 启动讨论 +- `WS /api/chatrooms/ws/{id}` - 聊天室WebSocket + +## 配置说明 + +### 环境变量 + +| 变量 | 说明 | 默认值 | +|------|------|--------| +| MONGODB_URL | MongoDB连接地址 | mongodb://localhost:27017 | +| MONGODB_DB | 数据库名称 | ai_chatroom | +| SECRET_KEY | 应用密钥 | - | +| ENCRYPTION_KEY | 加密密钥(32字节) | - | +| DEFAULT_HTTP_PROXY | 默认HTTP代理 | - | +| DEFAULT_HTTPS_PROXY | 默认HTTPS代理 | - | + +### 代理配置 + +对于需要代理访问的AI接口(如Gemini、OpenRouter),在接口配置中开启代理: + +```json +{ + "use_proxy": true, + "proxy_config": { + "http_proxy": "http://127.0.0.1:7890", + "https_proxy": "http://127.0.0.1:7890" + } +} +``` + +## 扩展开发 + +### 添加新的AI提供商 + +1. 在 `backend/adapters/` 创建新的适配器文件 +2. 继承 `BaseAdapter` 类 +3. 实现 `chat`、`chat_stream`、`test_connection` 方法 +4. 在 `adapters/__init__.py` 注册新适配器 + +### 自定义共识判断逻辑 + +修改 `backend/services/consensus_manager.py` 中的 `CONSENSUS_PROMPT` 模板,调整共识判断的标准。 + +## 许可证 + +MIT License + +## 贡献 + +欢迎提交Issue和Pull Request! diff --git a/backend/.env.example b/backend/.env.example new file mode 100644 index 0000000..1f6eae0 --- /dev/null +++ b/backend/.env.example @@ -0,0 +1,16 @@ +# MongoDB配置 +MONGODB_URL=mongodb://localhost:27017 +MONGODB_DB=ai_chatroom + +# 服务配置 +HOST=0.0.0.0 +PORT=8000 +DEBUG=true + +# 安全配置(生产环境请修改) +SECRET_KEY=your-secret-key-change-in-production +ENCRYPTION_KEY=your-encryption-key-32-bytes-long + +# 代理配置(可选) +# DEFAULT_HTTP_PROXY=http://127.0.0.1:7890 +# DEFAULT_HTTPS_PROXY=http://127.0.0.1:7890 diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..48d1afb --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,25 @@ +# AI聊天室后端 Dockerfile +FROM python:3.11-slim + +WORKDIR /app + +# 安装系统依赖 +RUN apt-get update && apt-get install -y \ + build-essential \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# 复制依赖文件 +COPY requirements.txt . + +# 安装Python依赖 +RUN pip install --no-cache-dir -r requirements.txt + +# 复制应用代码 +COPY . . + +# 暴露端口 +EXPOSE 8000 + +# 启动命令 +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/backend/adapters/__init__.py b/backend/adapters/__init__.py new file mode 100644 index 0000000..c801a60 --- /dev/null +++ b/backend/adapters/__init__.py @@ -0,0 +1,58 @@ +""" +AI接口适配器模块 +提供统一的AI调用接口 +""" +from .base_adapter import BaseAdapter, AdapterResponse, ChatMessage +from .minimax_adapter import MiniMaxAdapter +from .zhipu_adapter import ZhipuAdapter +from .openrouter_adapter import OpenRouterAdapter +from .kimi_adapter import KimiAdapter +from .deepseek_adapter import DeepSeekAdapter +from .gemini_adapter import GeminiAdapter +from .ollama_adapter import OllamaAdapter +from .llmstudio_adapter import LLMStudioAdapter + +__all__ = [ + "BaseAdapter", + "AdapterResponse", + "ChatMessage", + "MiniMaxAdapter", + "ZhipuAdapter", + "OpenRouterAdapter", + "KimiAdapter", + "DeepSeekAdapter", + "GeminiAdapter", + "OllamaAdapter", + "LLMStudioAdapter", +] + +# 适配器注册表 +ADAPTER_REGISTRY = { + "minimax": MiniMaxAdapter, + "zhipu": ZhipuAdapter, + "openrouter": OpenRouterAdapter, + "kimi": KimiAdapter, + "deepseek": DeepSeekAdapter, + "gemini": GeminiAdapter, + "ollama": OllamaAdapter, + "llmstudio": LLMStudioAdapter, +} + + +def get_adapter(provider_type: str) -> type: + """ + 根据提供商类型获取对应的适配器类 + + Args: + provider_type: 提供商类型标识 + + Returns: + 适配器类 + + Raises: + ValueError: 未知的提供商类型 + """ + adapter_class = ADAPTER_REGISTRY.get(provider_type.lower()) + if not adapter_class: + raise ValueError(f"未知的AI提供商类型: {provider_type}") + return adapter_class diff --git a/backend/adapters/base_adapter.py b/backend/adapters/base_adapter.py new file mode 100644 index 0000000..24e6761 --- /dev/null +++ b/backend/adapters/base_adapter.py @@ -0,0 +1,166 @@ +""" +AI适配器基类 +定义统一的AI调用接口 +""" +from abc import ABC, abstractmethod +from dataclasses import dataclass, field +from typing import List, Dict, Any, Optional, AsyncGenerator +from datetime import datetime + + +@dataclass +class ChatMessage: + """聊天消息""" + role: str # system, user, assistant + content: str # 消息内容 + name: Optional[str] = None # 发送者名称(可选) + + def to_dict(self) -> Dict[str, Any]: + """转换为字典""" + d = {"role": self.role, "content": self.content} + if self.name: + d["name"] = self.name + return d + + +@dataclass +class AdapterResponse: + """适配器响应""" + success: bool # 是否成功 + content: str = "" # 响应内容 + error: Optional[str] = None # 错误信息 + + # 统计信息 + prompt_tokens: int = 0 # 输入token数 + completion_tokens: int = 0 # 输出token数 + total_tokens: int = 0 # 总token数 + + # 元数据 + model: str = "" # 使用的模型 + finish_reason: str = "" # 结束原因 + latency_ms: float = 0.0 # 延迟(毫秒) + + # 工具调用结果 + tool_calls: List[Dict[str, Any]] = field(default_factory=list) + + def __post_init__(self): + if self.total_tokens == 0: + self.total_tokens = self.prompt_tokens + self.completion_tokens + + +class BaseAdapter(ABC): + """ + AI适配器基类 + 所有AI提供商适配器必须继承此类 + """ + + def __init__( + self, + api_key: str, + base_url: str, + model: str, + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs + ): + """ + 初始化适配器 + + Args: + api_key: API密钥 + base_url: API基础URL + model: 模型名称 + use_proxy: 是否使用代理 + proxy_config: 代理配置 + timeout: 超时时间(秒) + **kwargs: 额外参数 + """ + self.api_key = api_key + self.base_url = base_url + self.model = model + self.use_proxy = use_proxy + self.proxy_config = proxy_config or {} + self.timeout = timeout + self.extra_params = kwargs + + @abstractmethod + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """ + 发送聊天请求 + + Args: + messages: 消息列表 + temperature: 温度参数 + max_tokens: 最大token数 + **kwargs: 额外参数 + + Returns: + 适配器响应 + """ + pass + + @abstractmethod + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """ + 发送流式聊天请求 + + Args: + messages: 消息列表 + temperature: 温度参数 + max_tokens: 最大token数 + **kwargs: 额外参数 + + Yields: + 响应内容片段 + """ + pass + + @abstractmethod + async def test_connection(self) -> Dict[str, Any]: + """ + 测试API连接 + + Returns: + 测试结果字典,包含 success, message, latency_ms + """ + pass + + def _build_messages( + self, + messages: List[ChatMessage] + ) -> List[Dict[str, Any]]: + """ + 构建消息列表 + + Args: + messages: ChatMessage列表 + + Returns: + 字典格式的消息列表 + """ + return [msg.to_dict() for msg in messages] + + def _calculate_latency(self, start_time: datetime) -> float: + """ + 计算延迟 + + Args: + start_time: 开始时间 + + Returns: + 延迟毫秒数 + """ + return (datetime.utcnow() - start_time).total_seconds() * 1000 diff --git a/backend/adapters/deepseek_adapter.py b/backend/adapters/deepseek_adapter.py new file mode 100644 index 0000000..a002395 --- /dev/null +++ b/backend/adapters/deepseek_adapter.py @@ -0,0 +1,197 @@ +""" +DeepSeek适配器 +支持DeepSeek大模型API +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class DeepSeekAdapter(BaseAdapter): + """ + DeepSeek API适配器 + 兼容OpenAI API格式 + """ + + DEFAULT_BASE_URL = "https://api.deepseek.com/v1" + + def __init__( + self, + api_key: str, + base_url: str = "", + model: str = "deepseek-chat", + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + **kwargs + } + + response = await client.post( + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"DeepSeek API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + choice = data.get("choices", [{}])[0] + message = choice.get("message", {}) + usage = data.get("usage", {}) + + return AdapterResponse( + success=True, + content=message.get("content", ""), + model=data.get("model", self.model), + finish_reason=choice.get("finish_reason", ""), + prompt_tokens=usage.get("prompt_tokens", 0), + completion_tokens=usage.get("completion_tokens", 0), + total_tokens=usage.get("total_tokens", 0), + latency_ms=self._calculate_latency(start_time), + tool_calls=message.get("tool_calls", []) + ) + + except Exception as e: + logger.error(f"DeepSeek请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + "stream": True, + **kwargs + } + + async with client.stream( + "POST", + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) as response: + async for line in response.aiter_lines(): + if line.startswith("data: "): + data_str = line[6:] + if data_str.strip() == "[DONE]": + break + try: + data = json.loads(data_str) + delta = data.get("choices", [{}])[0].get("delta", {}) + content = delta.get("content", "") + if content: + yield content + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"DeepSeek流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + test_messages = [ + ChatMessage(role="user", content="你好,请回复'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } diff --git a/backend/adapters/gemini_adapter.py b/backend/adapters/gemini_adapter.py new file mode 100644 index 0000000..cf2e3a4 --- /dev/null +++ b/backend/adapters/gemini_adapter.py @@ -0,0 +1,250 @@ +""" +Gemini适配器 +支持Google Gemini大模型API +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class GeminiAdapter(BaseAdapter): + """ + Google Gemini API适配器 + 使用Gemini的原生API格式 + """ + + DEFAULT_BASE_URL = "https://generativelanguage.googleapis.com/v1beta" + + def __init__( + self, + api_key: str, + base_url: str = "", + model: str = "gemini-1.5-pro", + use_proxy: bool = True, # Gemini通常需要代理 + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + def _convert_messages_to_gemini( + self, + messages: List[ChatMessage] + ) -> tuple[str, List[Dict[str, Any]]]: + """ + 将消息转换为Gemini格式 + + Args: + messages: 标准消息列表 + + Returns: + (system_instruction, contents) + """ + system_instruction = "" + contents = [] + + for msg in messages: + if msg.role == "system": + system_instruction += msg.content + "\n" + else: + role = "user" if msg.role == "user" else "model" + contents.append({ + "role": role, + "parts": [{"text": msg.content}] + }) + + return system_instruction.strip(), contents + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + system_instruction, contents = self._convert_messages_to_gemini(messages) + + payload = { + "contents": contents, + "generationConfig": { + "temperature": temperature, + "maxOutputTokens": max_tokens, + "topP": kwargs.get("top_p", 0.95), + "topK": kwargs.get("top_k", 40) + } + } + + # 添加系统指令 + if system_instruction: + payload["systemInstruction"] = { + "parts": [{"text": system_instruction}] + } + + url = f"{self.base_url}/models/{self.model}:generateContent?key={self.api_key}" + + response = await client.post( + url, + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"Gemini API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + + # 检查是否有候选回复 + candidates = data.get("candidates", []) + if not candidates: + return AdapterResponse( + success=False, + error="没有生成回复", + latency_ms=self._calculate_latency(start_time) + ) + + candidate = candidates[0] + content = candidate.get("content", {}) + parts = content.get("parts", []) + text = "".join(part.get("text", "") for part in parts) + + # 获取token使用情况 + usage = data.get("usageMetadata", {}) + + return AdapterResponse( + success=True, + content=text, + model=self.model, + finish_reason=candidate.get("finishReason", ""), + prompt_tokens=usage.get("promptTokenCount", 0), + completion_tokens=usage.get("candidatesTokenCount", 0), + total_tokens=usage.get("totalTokenCount", 0), + latency_ms=self._calculate_latency(start_time) + ) + + except Exception as e: + logger.error(f"Gemini请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + system_instruction, contents = self._convert_messages_to_gemini(messages) + + payload = { + "contents": contents, + "generationConfig": { + "temperature": temperature, + "maxOutputTokens": max_tokens, + "topP": kwargs.get("top_p", 0.95), + "topK": kwargs.get("top_k", 40) + } + } + + if system_instruction: + payload["systemInstruction"] = { + "parts": [{"text": system_instruction}] + } + + url = f"{self.base_url}/models/{self.model}:streamGenerateContent?key={self.api_key}&alt=sse" + + async with client.stream( + "POST", + url, + json=payload + ) as response: + async for line in response.aiter_lines(): + if line.startswith("data: "): + data_str = line[6:] + try: + data = json.loads(data_str) + candidates = data.get("candidates", []) + if candidates: + content = candidates[0].get("content", {}) + parts = content.get("parts", []) + for part in parts: + text = part.get("text", "") + if text: + yield text + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"Gemini流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + test_messages = [ + ChatMessage(role="user", content="Hello, respond with 'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } diff --git a/backend/adapters/kimi_adapter.py b/backend/adapters/kimi_adapter.py new file mode 100644 index 0000000..1c1b86f --- /dev/null +++ b/backend/adapters/kimi_adapter.py @@ -0,0 +1,197 @@ +""" +Kimi适配器 +支持月之暗面Kimi大模型API +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class KimiAdapter(BaseAdapter): + """ + Kimi API适配器 + 兼容OpenAI API格式 + """ + + DEFAULT_BASE_URL = "https://api.moonshot.cn/v1" + + def __init__( + self, + api_key: str, + base_url: str = "", + model: str = "moonshot-v1-8k", + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + **kwargs + } + + response = await client.post( + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"Kimi API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + choice = data.get("choices", [{}])[0] + message = choice.get("message", {}) + usage = data.get("usage", {}) + + return AdapterResponse( + success=True, + content=message.get("content", ""), + model=data.get("model", self.model), + finish_reason=choice.get("finish_reason", ""), + prompt_tokens=usage.get("prompt_tokens", 0), + completion_tokens=usage.get("completion_tokens", 0), + total_tokens=usage.get("total_tokens", 0), + latency_ms=self._calculate_latency(start_time), + tool_calls=message.get("tool_calls", []) + ) + + except Exception as e: + logger.error(f"Kimi请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + "stream": True, + **kwargs + } + + async with client.stream( + "POST", + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) as response: + async for line in response.aiter_lines(): + if line.startswith("data: "): + data_str = line[6:] + if data_str.strip() == "[DONE]": + break + try: + data = json.loads(data_str) + delta = data.get("choices", [{}])[0].get("delta", {}) + content = delta.get("content", "") + if content: + yield content + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"Kimi流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + test_messages = [ + ChatMessage(role="user", content="你好,请回复'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } diff --git a/backend/adapters/llmstudio_adapter.py b/backend/adapters/llmstudio_adapter.py new file mode 100644 index 0000000..55c4bd5 --- /dev/null +++ b/backend/adapters/llmstudio_adapter.py @@ -0,0 +1,253 @@ +""" +LLM Studio适配器 +支持本地LLM Studio服务 +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class LLMStudioAdapter(BaseAdapter): + """ + LLM Studio API适配器 + 兼容OpenAI API格式的本地服务 + """ + + DEFAULT_BASE_URL = "http://localhost:1234/v1" + + def __init__( + self, + api_key: str = "lm-studio", # LLM Studio使用固定key + base_url: str = "", + model: str = "local-model", + use_proxy: bool = False, # 本地服务不需要代理 + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 120, # 本地模型可能需要更长时间 + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + **kwargs + } + + response = await client.post( + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"LLM Studio API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + choice = data.get("choices", [{}])[0] + message = choice.get("message", {}) + usage = data.get("usage", {}) + + return AdapterResponse( + success=True, + content=message.get("content", ""), + model=data.get("model", self.model), + finish_reason=choice.get("finish_reason", ""), + prompt_tokens=usage.get("prompt_tokens", 0), + completion_tokens=usage.get("completion_tokens", 0), + total_tokens=usage.get("total_tokens", 0), + latency_ms=self._calculate_latency(start_time) + ) + + except Exception as e: + logger.error(f"LLM Studio请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + "stream": True, + **kwargs + } + + async with client.stream( + "POST", + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) as response: + async for line in response.aiter_lines(): + if line.startswith("data: "): + data_str = line[6:] + if data_str.strip() == "[DONE]": + break + try: + data = json.loads(data_str) + delta = data.get("choices", [{}])[0].get("delta", {}) + content = delta.get("content", "") + if content: + yield content + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"LLM Studio流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + # 首先检查服务是否在运行 + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=10 + ) as client: + # 获取模型列表 + response = await client.get( + f"{self.base_url}/models", + headers={"Authorization": f"Bearer {self.api_key}"} + ) + + if response.status_code != 200: + return { + "success": False, + "message": "LLM Studio服务未运行或不可访问", + "latency_ms": self._calculate_latency(start_time) + } + + data = response.json() + models = [m.get("id", "") for m in data.get("data", [])] + + if not models: + return { + "success": False, + "message": "LLM Studio中没有加载的模型", + "latency_ms": self._calculate_latency(start_time) + } + + # 发送测试消息 + test_messages = [ + ChatMessage(role="user", content="Hello, respond with 'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } + + async def list_models(self) -> List[Dict[str, Any]]: + """ + 列出LLM Studio中加载的模型 + + Returns: + 模型信息列表 + """ + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=10 + ) as client: + response = await client.get( + f"{self.base_url}/models", + headers={"Authorization": f"Bearer {self.api_key}"} + ) + + if response.status_code == 200: + data = response.json() + return data.get("data", []) + + except Exception as e: + logger.error(f"获取LLM Studio模型列表失败: {e}") + + return [] diff --git a/backend/adapters/minimax_adapter.py b/backend/adapters/minimax_adapter.py new file mode 100644 index 0000000..b1dc83c --- /dev/null +++ b/backend/adapters/minimax_adapter.py @@ -0,0 +1,251 @@ +""" +MiniMax适配器 +支持MiniMax大模型API +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class MiniMaxAdapter(BaseAdapter): + """ + MiniMax API适配器 + 支持abab系列模型 + """ + + DEFAULT_BASE_URL = "https://api.minimax.chat/v1" + + def __init__( + self, + api_key: str, + base_url: str = "", + model: str = "abab6.5-chat", + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + # MiniMax需要group_id + self.group_id = kwargs.get("group_id", "") + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + # MiniMax使用特殊的消息格式 + minimax_messages = [] + bot_setting = [] + + for msg in messages: + if msg.role == "system": + bot_setting.append({ + "bot_name": "assistant", + "content": msg.content + }) + else: + minimax_messages.append({ + "sender_type": "USER" if msg.role == "user" else "BOT", + "sender_name": msg.name or ("用户" if msg.role == "user" else "assistant"), + "text": msg.content + }) + + payload = { + "model": self.model, + "messages": minimax_messages, + "bot_setting": bot_setting if bot_setting else [{"bot_name": "assistant", "content": "你是一个有帮助的助手"}], + "temperature": temperature, + "tokens_to_generate": max_tokens, + "mask_sensitive_info": False, + **kwargs + } + + url = f"{self.base_url}/text/chatcompletion_v2" + if self.group_id: + url = f"{url}?GroupId={self.group_id}" + + response = await client.post( + url, + headers=headers, + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"MiniMax API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + + # 检查API返回的错误 + if data.get("base_resp", {}).get("status_code", 0) != 0: + error_msg = data.get("base_resp", {}).get("status_msg", "未知错误") + return AdapterResponse( + success=False, + error=f"API错误: {error_msg}", + latency_ms=self._calculate_latency(start_time) + ) + + reply = data.get("reply", "") + usage = data.get("usage", {}) + + return AdapterResponse( + success=True, + content=reply, + model=self.model, + finish_reason=data.get("output_sensitive", False) and "content_filter" or "stop", + prompt_tokens=usage.get("prompt_tokens", 0), + completion_tokens=usage.get("completion_tokens", 0), + total_tokens=usage.get("total_tokens", 0), + latency_ms=self._calculate_latency(start_time) + ) + + except Exception as e: + logger.error(f"MiniMax请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + minimax_messages = [] + bot_setting = [] + + for msg in messages: + if msg.role == "system": + bot_setting.append({ + "bot_name": "assistant", + "content": msg.content + }) + else: + minimax_messages.append({ + "sender_type": "USER" if msg.role == "user" else "BOT", + "sender_name": msg.name or ("用户" if msg.role == "user" else "assistant"), + "text": msg.content + }) + + payload = { + "model": self.model, + "messages": minimax_messages, + "bot_setting": bot_setting if bot_setting else [{"bot_name": "assistant", "content": "你是一个有帮助的助手"}], + "temperature": temperature, + "tokens_to_generate": max_tokens, + "stream": True, + **kwargs + } + + url = f"{self.base_url}/text/chatcompletion_v2" + if self.group_id: + url = f"{url}?GroupId={self.group_id}" + + async with client.stream( + "POST", + url, + headers=headers, + json=payload + ) as response: + async for line in response.aiter_lines(): + if line.startswith("data: "): + data_str = line[6:] + if data_str.strip() == "[DONE]": + break + try: + data = json.loads(data_str) + delta = data.get("choices", [{}])[0].get("delta", {}) + content = delta.get("content", "") + if content: + yield content + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"MiniMax流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + test_messages = [ + ChatMessage(role="user", content="你好,请回复'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } diff --git a/backend/adapters/ollama_adapter.py b/backend/adapters/ollama_adapter.py new file mode 100644 index 0000000..38dbcc5 --- /dev/null +++ b/backend/adapters/ollama_adapter.py @@ -0,0 +1,241 @@ +""" +Ollama适配器 +支持本地Ollama服务 +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class OllamaAdapter(BaseAdapter): + """ + Ollama API适配器 + 用于连接本地Ollama服务 + """ + + DEFAULT_BASE_URL = "http://localhost:11434" + + def __init__( + self, + api_key: str = "", # Ollama通常不需要API密钥 + base_url: str = "", + model: str = "llama2", + use_proxy: bool = False, # 本地服务通常不需要代理 + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 120, # 本地模型可能需要更长时间 + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "options": { + "temperature": temperature, + "num_predict": max_tokens, + }, + "stream": False + } + + response = await client.post( + f"{self.base_url}/api/chat", + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"Ollama API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + message = data.get("message", {}) + + return AdapterResponse( + success=True, + content=message.get("content", ""), + model=data.get("model", self.model), + finish_reason=data.get("done_reason", "stop"), + prompt_tokens=data.get("prompt_eval_count", 0), + completion_tokens=data.get("eval_count", 0), + total_tokens=( + data.get("prompt_eval_count", 0) + + data.get("eval_count", 0) + ), + latency_ms=self._calculate_latency(start_time) + ) + + except Exception as e: + logger.error(f"Ollama请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "options": { + "temperature": temperature, + "num_predict": max_tokens, + }, + "stream": True + } + + async with client.stream( + "POST", + f"{self.base_url}/api/chat", + json=payload + ) as response: + async for line in response.aiter_lines(): + if line: + try: + data = json.loads(line) + message = data.get("message", {}) + content = message.get("content", "") + if content: + yield content + + # 检查是否完成 + if data.get("done", False): + break + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"Ollama流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + # 首先检查服务是否在运行 + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=10 + ) as client: + # 检查模型是否存在 + response = await client.get(f"{self.base_url}/api/tags") + + if response.status_code != 200: + return { + "success": False, + "message": "Ollama服务未运行或不可访问", + "latency_ms": self._calculate_latency(start_time) + } + + data = response.json() + models = [m.get("name", "").split(":")[0] for m in data.get("models", [])] + + model_name = self.model.split(":")[0] + if model_name not in models: + return { + "success": False, + "message": f"模型 {self.model} 未安装,可用模型: {', '.join(models)}", + "latency_ms": self._calculate_latency(start_time) + } + + # 发送测试消息 + test_messages = [ + ChatMessage(role="user", content="Hello, respond with 'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } + + async def list_models(self) -> List[str]: + """ + 列出本地可用的模型 + + Returns: + 模型名称列表 + """ + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=10 + ) as client: + response = await client.get(f"{self.base_url}/api/tags") + + if response.status_code == 200: + data = response.json() + return [m.get("name", "") for m in data.get("models", [])] + + except Exception as e: + logger.error(f"获取Ollama模型列表失败: {e}") + + return [] diff --git a/backend/adapters/openrouter_adapter.py b/backend/adapters/openrouter_adapter.py new file mode 100644 index 0000000..6d886c2 --- /dev/null +++ b/backend/adapters/openrouter_adapter.py @@ -0,0 +1,201 @@ +""" +OpenRouter适配器 +支持通过OpenRouter访问多种AI模型 +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class OpenRouterAdapter(BaseAdapter): + """ + OpenRouter API适配器 + 兼容OpenAI API格式 + """ + + DEFAULT_BASE_URL = "https://openrouter.ai/api/v1" + + def __init__( + self, + api_key: str, + base_url: str = "", + model: str = "openai/gpt-4-turbo", + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + "HTTP-Referer": kwargs.get("referer", "https://ai-chatroom.local"), + "X-Title": kwargs.get("title", "AI ChatRoom") + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + **kwargs + } + + response = await client.post( + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"OpenRouter API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + choice = data.get("choices", [{}])[0] + message = choice.get("message", {}) + usage = data.get("usage", {}) + + return AdapterResponse( + success=True, + content=message.get("content", ""), + model=data.get("model", self.model), + finish_reason=choice.get("finish_reason", ""), + prompt_tokens=usage.get("prompt_tokens", 0), + completion_tokens=usage.get("completion_tokens", 0), + total_tokens=usage.get("total_tokens", 0), + latency_ms=self._calculate_latency(start_time), + tool_calls=message.get("tool_calls", []) + ) + + except Exception as e: + logger.error(f"OpenRouter请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + "HTTP-Referer": kwargs.get("referer", "https://ai-chatroom.local"), + "X-Title": kwargs.get("title", "AI ChatRoom") + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + "stream": True, + **kwargs + } + + async with client.stream( + "POST", + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) as response: + async for line in response.aiter_lines(): + if line.startswith("data: "): + data_str = line[6:] + if data_str.strip() == "[DONE]": + break + try: + data = json.loads(data_str) + delta = data.get("choices", [{}])[0].get("delta", {}) + content = delta.get("content", "") + if content: + yield content + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"OpenRouter流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + test_messages = [ + ChatMessage(role="user", content="Hello, respond with 'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } diff --git a/backend/adapters/zhipu_adapter.py b/backend/adapters/zhipu_adapter.py new file mode 100644 index 0000000..3763972 --- /dev/null +++ b/backend/adapters/zhipu_adapter.py @@ -0,0 +1,197 @@ +""" +智谱AI适配器 +支持智谱GLM系列模型 +""" +import json +from datetime import datetime +from typing import List, Dict, Any, Optional, AsyncGenerator +from loguru import logger + +from .base_adapter import BaseAdapter, ChatMessage, AdapterResponse +from utils.proxy_handler import get_http_client + + +class ZhipuAdapter(BaseAdapter): + """ + 智谱AI API适配器 + 支持GLM-4、GLM-3等模型 + """ + + DEFAULT_BASE_URL = "https://open.bigmodel.cn/api/paas/v4" + + def __init__( + self, + api_key: str, + base_url: str = "", + model: str = "glm-4", + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs + ): + super().__init__( + api_key=api_key, + base_url=base_url or self.DEFAULT_BASE_URL, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + async def chat( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """发送聊天请求""" + start_time = datetime.utcnow() + + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + **kwargs + } + + response = await client.post( + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) + + if response.status_code != 200: + error_text = response.text + logger.error(f"智谱API错误: {response.status_code} - {error_text}") + return AdapterResponse( + success=False, + error=f"API错误: {response.status_code} - {error_text}", + latency_ms=self._calculate_latency(start_time) + ) + + data = response.json() + choice = data.get("choices", [{}])[0] + message = choice.get("message", {}) + usage = data.get("usage", {}) + + return AdapterResponse( + success=True, + content=message.get("content", ""), + model=data.get("model", self.model), + finish_reason=choice.get("finish_reason", ""), + prompt_tokens=usage.get("prompt_tokens", 0), + completion_tokens=usage.get("completion_tokens", 0), + total_tokens=usage.get("total_tokens", 0), + latency_ms=self._calculate_latency(start_time), + tool_calls=message.get("tool_calls", []) + ) + + except Exception as e: + logger.error(f"智谱API请求异常: {e}") + return AdapterResponse( + success=False, + error=str(e), + latency_ms=self._calculate_latency(start_time) + ) + + async def chat_stream( + self, + messages: List[ChatMessage], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AsyncGenerator[str, None]: + """发送流式聊天请求""" + try: + async with get_http_client( + use_proxy=self.use_proxy, + proxy_config=self.proxy_config, + timeout=self.timeout + ) as client: + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "model": self.model, + "messages": self._build_messages(messages), + "temperature": temperature, + "max_tokens": max_tokens, + "stream": True, + **kwargs + } + + async with client.stream( + "POST", + f"{self.base_url}/chat/completions", + headers=headers, + json=payload + ) as response: + async for line in response.aiter_lines(): + if line.startswith("data: "): + data_str = line[6:] + if data_str.strip() == "[DONE]": + break + try: + data = json.loads(data_str) + delta = data.get("choices", [{}])[0].get("delta", {}) + content = delta.get("content", "") + if content: + yield content + except json.JSONDecodeError: + continue + + except Exception as e: + logger.error(f"智谱流式请求异常: {e}") + yield f"[错误: {str(e)}]" + + async def test_connection(self) -> Dict[str, Any]: + """测试API连接""" + start_time = datetime.utcnow() + + try: + test_messages = [ + ChatMessage(role="user", content="你好,请回复'OK'") + ] + + response = await self.chat( + messages=test_messages, + temperature=0, + max_tokens=10 + ) + + if response.success: + return { + "success": True, + "message": "连接成功", + "model": response.model, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error, + "latency_ms": response.latency_ms + } + + except Exception as e: + return { + "success": False, + "message": str(e), + "latency_ms": self._calculate_latency(start_time) + } diff --git a/backend/config.py b/backend/config.py new file mode 100644 index 0000000..09ba582 --- /dev/null +++ b/backend/config.py @@ -0,0 +1,50 @@ +""" +应用配置模块 +从环境变量加载配置项 +""" +from pydantic_settings import BaseSettings +from typing import Optional + + +class Settings(BaseSettings): + """应用配置类""" + + # MongoDB配置 + MONGODB_URL: str = "mongodb://localhost:27017" + MONGODB_DB: str = "ai_chatroom" + + # 服务配置 + HOST: str = "0.0.0.0" + PORT: int = 8000 + DEBUG: bool = True + + # 安全配置 + SECRET_KEY: str = "your-secret-key-change-in-production" + ENCRYPTION_KEY: str = "your-encryption-key-32-bytes-long" + + # CORS配置 + CORS_ORIGINS: list = ["http://localhost:3000", "http://127.0.0.1:3000"] + + # WebSocket配置 + WS_HEARTBEAT_INTERVAL: int = 30 + + # 默认AI配置 + DEFAULT_TIMEOUT: int = 60 + DEFAULT_MAX_TOKENS: int = 2000 + DEFAULT_TEMPERATURE: float = 0.7 + + # 代理配置(全局默认) + DEFAULT_HTTP_PROXY: Optional[str] = None + DEFAULT_HTTPS_PROXY: Optional[str] = None + + # 速率限制 + RATE_LIMIT_REQUESTS: int = 100 + RATE_LIMIT_PERIOD: int = 60 # 秒 + + class Config: + env_file = ".env" + env_file_encoding = "utf-8" + + +# 全局配置实例 +settings = Settings() diff --git a/backend/database/__init__.py b/backend/database/__init__.py new file mode 100644 index 0000000..46e54ef --- /dev/null +++ b/backend/database/__init__.py @@ -0,0 +1,10 @@ +""" +数据库模块 +""" +from .connection import connect_db, close_db, get_database + +__all__ = [ + "connect_db", + "close_db", + "get_database", +] diff --git a/backend/database/connection.py b/backend/database/connection.py new file mode 100644 index 0000000..8ace375 --- /dev/null +++ b/backend/database/connection.py @@ -0,0 +1,94 @@ +""" +MongoDB数据库连接模块 +使用Motor异步驱动 +""" +from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase +from beanie import init_beanie +from loguru import logger +from typing import Optional + +from config import settings + +# 全局数据库客户端和数据库实例 +_client: Optional[AsyncIOMotorClient] = None +_database: Optional[AsyncIOMotorDatabase] = None + + +async def connect_db() -> None: + """ + 连接MongoDB数据库 + 初始化Beanie ODM + """ + global _client, _database + + try: + _client = AsyncIOMotorClient(settings.MONGODB_URL) + _database = _client[settings.MONGODB_DB] + + # 导入所有文档模型用于初始化Beanie + from models.ai_provider import AIProvider + from models.agent import Agent + from models.chatroom import ChatRoom + from models.message import Message + from models.discussion_result import DiscussionResult + from models.agent_memory import AgentMemory + + # 初始化Beanie + await init_beanie( + database=_database, + document_models=[ + AIProvider, + Agent, + ChatRoom, + Message, + DiscussionResult, + AgentMemory, + ] + ) + + logger.info(f"已连接到MongoDB数据库: {settings.MONGODB_DB}") + + except Exception as e: + logger.error(f"数据库连接失败: {e}") + raise + + +async def close_db() -> None: + """ + 关闭数据库连接 + """ + global _client + + if _client: + _client.close() + logger.info("数据库连接已关闭") + + +def get_database() -> AsyncIOMotorDatabase: + """ + 获取数据库实例 + + Returns: + MongoDB数据库实例 + + Raises: + RuntimeError: 数据库未初始化 + """ + if _database is None: + raise RuntimeError("数据库未初始化,请先调用connect_db()") + return _database + + +def get_client() -> AsyncIOMotorClient: + """ + 获取数据库客户端 + + Returns: + MongoDB客户端实例 + + Raises: + RuntimeError: 客户端未初始化 + """ + if _client is None: + raise RuntimeError("数据库客户端未初始化") + return _client diff --git a/backend/main.py b/backend/main.py new file mode 100644 index 0000000..fac10b7 --- /dev/null +++ b/backend/main.py @@ -0,0 +1,73 @@ +""" +AI聊天室后端主入口 +FastAPI应用启动文件 +""" +import uvicorn +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from contextlib import asynccontextmanager +from loguru import logger + +from config import settings +from database.connection import connect_db, close_db +from routers import providers, agents, chatrooms, discussions + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """ + 应用生命周期管理 + 启动时连接数据库,关闭时断开连接 + """ + logger.info("正在启动AI聊天室服务...") + await connect_db() + logger.info("数据库连接成功") + yield + logger.info("正在关闭AI聊天室服务...") + await close_db() + logger.info("服务已关闭") + + +# 创建FastAPI应用 +app = FastAPI( + title="AI聊天室", + description="多Agent协作讨论平台", + version="1.0.0", + lifespan=lifespan +) + +# 配置CORS +app.add_middleware( + CORSMiddleware, + allow_origins=settings.CORS_ORIGINS, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# 注册路由 +app.include_router(providers.router, prefix="/api/providers", tags=["AI接口管理"]) +app.include_router(agents.router, prefix="/api/agents", tags=["Agent管理"]) +app.include_router(chatrooms.router, prefix="/api/chatrooms", tags=["聊天室管理"]) +app.include_router(discussions.router, prefix="/api/discussions", tags=["讨论结果"]) + + +@app.get("/") +async def root(): + """根路径健康检查""" + return {"message": "AI聊天室服务运行中", "version": "1.0.0"} + + +@app.get("/health") +async def health_check(): + """健康检查接口""" + return {"status": "healthy"} + + +if __name__ == "__main__": + uvicorn.run( + "main:app", + host=settings.HOST, + port=settings.PORT, + reload=settings.DEBUG + ) diff --git a/backend/models/__init__.py b/backend/models/__init__.py new file mode 100644 index 0000000..ec7f3dd --- /dev/null +++ b/backend/models/__init__.py @@ -0,0 +1,25 @@ +""" +数据模型模块 +""" +from .ai_provider import AIProvider, ProxyConfig, RateLimit +from .agent import Agent, AgentCapabilities, AgentBehavior +from .chatroom import ChatRoom, ChatRoomConfig +from .message import Message, MessageType +from .discussion_result import DiscussionResult +from .agent_memory import AgentMemory, MemoryType + +__all__ = [ + "AIProvider", + "ProxyConfig", + "RateLimit", + "Agent", + "AgentCapabilities", + "AgentBehavior", + "ChatRoom", + "ChatRoomConfig", + "Message", + "MessageType", + "DiscussionResult", + "AgentMemory", + "MemoryType", +] diff --git a/backend/models/agent.py b/backend/models/agent.py new file mode 100644 index 0000000..f3fcbc8 --- /dev/null +++ b/backend/models/agent.py @@ -0,0 +1,168 @@ +""" +Agent数据模型 +定义AI聊天代理的配置结构 +""" +from datetime import datetime +from typing import Optional, Dict, Any, List +from pydantic import Field +from beanie import Document + + +class AgentCapabilities: + """Agent能力配置""" + memory_enabled: bool = False # 是否启用记忆 + mcp_tools: List[str] = [] # 可用的MCP工具 + skills: List[str] = [] # 可用的技能 + multimodal: bool = False # 是否支持多模态 + + def __init__( + self, + memory_enabled: bool = False, + mcp_tools: Optional[List[str]] = None, + skills: Optional[List[str]] = None, + multimodal: bool = False + ): + self.memory_enabled = memory_enabled + self.mcp_tools = mcp_tools or [] + self.skills = skills or [] + self.multimodal = multimodal + + def to_dict(self) -> Dict[str, Any]: + """转换为字典""" + return { + "memory_enabled": self.memory_enabled, + "mcp_tools": self.mcp_tools, + "skills": self.skills, + "multimodal": self.multimodal + } + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "AgentCapabilities": + """从字典创建""" + if not data: + return cls() + return cls( + memory_enabled=data.get("memory_enabled", False), + mcp_tools=data.get("mcp_tools", []), + skills=data.get("skills", []), + multimodal=data.get("multimodal", False) + ) + + +class AgentBehavior: + """Agent行为配置""" + speak_threshold: float = 0.5 # 发言阈值(判断是否需要发言) + max_speak_per_round: int = 2 # 每轮最多发言次数 + speak_style: str = "balanced" # 发言风格: concise, balanced, detailed + + def __init__( + self, + speak_threshold: float = 0.5, + max_speak_per_round: int = 2, + speak_style: str = "balanced" + ): + self.speak_threshold = speak_threshold + self.max_speak_per_round = max_speak_per_round + self.speak_style = speak_style + + def to_dict(self) -> Dict[str, Any]: + """转换为字典""" + return { + "speak_threshold": self.speak_threshold, + "max_speak_per_round": self.max_speak_per_round, + "speak_style": self.speak_style + } + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "AgentBehavior": + """从字典创建""" + if not data: + return cls() + return cls( + speak_threshold=data.get("speak_threshold", 0.5), + max_speak_per_round=data.get("max_speak_per_round", 2), + speak_style=data.get("speak_style", "balanced") + ) + + +class Agent(Document): + """ + Agent文档模型 + 存储AI代理的配置信息 + """ + agent_id: str = Field(..., description="唯一标识") + name: str = Field(..., description="Agent名称") + role: str = Field(..., description="角色定义") + system_prompt: str = Field(..., description="系统提示词") + provider_id: str = Field(..., description="使用的AI接口ID") + + # 模型参数 + temperature: float = Field(default=0.7, ge=0, le=2, description="温度参数") + max_tokens: int = Field(default=2000, gt=0, description="最大token数") + + # 能力配置 + capabilities: Dict[str, Any] = Field( + default_factory=lambda: { + "memory_enabled": False, + "mcp_tools": [], + "skills": [], + "multimodal": False + }, + description="能力配置" + ) + + # 行为配置 + behavior: Dict[str, Any] = Field( + default_factory=lambda: { + "speak_threshold": 0.5, + "max_speak_per_round": 2, + "speak_style": "balanced" + }, + description="行为配置" + ) + + # 外观配置 + avatar: Optional[str] = Field(default=None, description="头像URL") + color: str = Field(default="#1890ff", description="代表颜色") + + # 元数据 + enabled: bool = Field(default=True, description="是否启用") + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + + class Settings: + name = "agents" + + def get_capabilities(self) -> AgentCapabilities: + """获取能力配置对象""" + return AgentCapabilities.from_dict(self.capabilities) + + def get_behavior(self) -> AgentBehavior: + """获取行为配置对象""" + return AgentBehavior.from_dict(self.behavior) + + class Config: + json_schema_extra = { + "example": { + "agent_id": "product-manager", + "name": "产品经理", + "role": "产品规划和需求分析专家", + "system_prompt": "你是一位经验丰富的产品经理,擅长分析用户需求...", + "provider_id": "openrouter-gpt4", + "temperature": 0.7, + "max_tokens": 2000, + "capabilities": { + "memory_enabled": True, + "mcp_tools": ["web_search"], + "skills": [], + "multimodal": False + }, + "behavior": { + "speak_threshold": 0.5, + "max_speak_per_round": 2, + "speak_style": "balanced" + }, + "avatar": "https://example.com/avatar.png", + "color": "#1890ff" + } + } diff --git a/backend/models/agent_memory.py b/backend/models/agent_memory.py new file mode 100644 index 0000000..712836d --- /dev/null +++ b/backend/models/agent_memory.py @@ -0,0 +1,123 @@ +""" +Agent记忆数据模型 +定义Agent的记忆存储结构 +""" +from datetime import datetime +from typing import Optional, List +from enum import Enum +from pydantic import Field +from beanie import Document + + +class MemoryType(str, Enum): + """记忆类型枚举""" + SHORT_TERM = "short_term" # 短期记忆(会话内) + LONG_TERM = "long_term" # 长期记忆(跨会话) + EPISODIC = "episodic" # 情景记忆(特定事件) + SEMANTIC = "semantic" # 语义记忆(知识性) + + +class AgentMemory(Document): + """ + Agent记忆文档模型 + 存储Agent的记忆内容 + """ + memory_id: str = Field(..., description="唯一标识") + agent_id: str = Field(..., description="Agent ID") + + # 记忆内容 + memory_type: str = Field( + default=MemoryType.SHORT_TERM.value, + description="记忆类型" + ) + content: str = Field(..., description="记忆内容") + summary: str = Field(default="", description="内容摘要") + + # 向量嵌入(用于相似度检索) + embedding: List[float] = Field(default_factory=list, description="向量嵌入") + + # 元数据 + importance: float = Field(default=0.5, ge=0, le=1, description="重要性评分") + access_count: int = Field(default=0, description="访问次数") + + # 关联信息 + source_room_id: Optional[str] = Field(default=None, description="来源聊天室ID") + source_discussion_id: Optional[str] = Field(default=None, description="来源讨论ID") + related_agents: List[str] = Field(default_factory=list, description="相关Agent列表") + tags: List[str] = Field(default_factory=list, description="标签") + + # 时间戳 + created_at: datetime = Field(default_factory=datetime.utcnow) + last_accessed: datetime = Field(default_factory=datetime.utcnow) + expires_at: Optional[datetime] = Field(default=None, description="过期时间") + + class Settings: + name = "agent_memories" + indexes = [ + [("agent_id", 1)], + [("memory_type", 1)], + [("importance", -1)], + [("last_accessed", -1)], + ] + + def access(self) -> None: + """ + 记录访问,更新访问计数和时间 + """ + self.access_count += 1 + self.last_accessed = datetime.utcnow() + + def is_expired(self) -> bool: + """ + 检查记忆是否已过期 + + Returns: + 是否过期 + """ + if self.expires_at is None: + return False + return datetime.utcnow() > self.expires_at + + def calculate_relevance_score( + self, + similarity: float, + time_decay_factor: float = 0.1 + ) -> float: + """ + 计算综合相关性分数 + 结合向量相似度、重要性和时间衰减 + + Args: + similarity: 向量相似度 (0-1) + time_decay_factor: 时间衰减因子 + + Returns: + 综合相关性分数 + """ + # 计算时间衰减 + hours_since_access = (datetime.utcnow() - self.last_accessed).total_seconds() / 3600 + time_decay = 1.0 / (1.0 + time_decay_factor * hours_since_access) + + # 综合评分 + score = ( + 0.5 * similarity + + 0.3 * self.importance + + 0.2 * time_decay + ) + + return min(1.0, max(0.0, score)) + + class Config: + json_schema_extra = { + "example": { + "memory_id": "mem-001", + "agent_id": "product-manager", + "memory_type": "long_term", + "content": "在登录系统设计讨论中,团队决定采用OAuth2.0方案", + "summary": "登录系统采用OAuth2.0", + "importance": 0.8, + "access_count": 5, + "source_room_id": "product-design-room", + "tags": ["登录", "OAuth", "认证"] + } + } diff --git a/backend/models/ai_provider.py b/backend/models/ai_provider.py new file mode 100644 index 0000000..076013d --- /dev/null +++ b/backend/models/ai_provider.py @@ -0,0 +1,149 @@ +""" +AI接口提供商数据模型 +定义AI服务配置结构 +""" +from datetime import datetime +from typing import Optional, Dict, Any, List +from enum import Enum +from pydantic import Field +from beanie import Document + + +class ProviderType(str, Enum): + """AI提供商类型枚举""" + MINIMAX = "minimax" + ZHIPU = "zhipu" + OPENROUTER = "openrouter" + KIMI = "kimi" + DEEPSEEK = "deepseek" + GEMINI = "gemini" + OLLAMA = "ollama" + LLMSTUDIO = "llmstudio" + + +class ProxyConfig: + """代理配置""" + http_proxy: Optional[str] = None # HTTP代理地址 + https_proxy: Optional[str] = None # HTTPS代理地址 + no_proxy: List[str] = [] # 不使用代理的域名列表 + + def __init__( + self, + http_proxy: Optional[str] = None, + https_proxy: Optional[str] = None, + no_proxy: Optional[List[str]] = None + ): + self.http_proxy = http_proxy + self.https_proxy = https_proxy + self.no_proxy = no_proxy or [] + + def to_dict(self) -> Dict[str, Any]: + """转换为字典""" + return { + "http_proxy": self.http_proxy, + "https_proxy": self.https_proxy, + "no_proxy": self.no_proxy + } + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "ProxyConfig": + """从字典创建""" + if not data: + return cls() + return cls( + http_proxy=data.get("http_proxy"), + https_proxy=data.get("https_proxy"), + no_proxy=data.get("no_proxy", []) + ) + + +class RateLimit: + """速率限制配置""" + requests_per_minute: int = 60 # 每分钟请求数 + tokens_per_minute: int = 100000 # 每分钟token数 + + def __init__( + self, + requests_per_minute: int = 60, + tokens_per_minute: int = 100000 + ): + self.requests_per_minute = requests_per_minute + self.tokens_per_minute = tokens_per_minute + + def to_dict(self) -> Dict[str, int]: + """转换为字典""" + return { + "requests_per_minute": self.requests_per_minute, + "tokens_per_minute": self.tokens_per_minute + } + + @classmethod + def from_dict(cls, data: Dict[str, int]) -> "RateLimit": + """从字典创建""" + if not data: + return cls() + return cls( + requests_per_minute=data.get("requests_per_minute", 60), + tokens_per_minute=data.get("tokens_per_minute", 100000) + ) + + +class AIProvider(Document): + """ + AI接口提供商文档模型 + 存储各AI服务的配置信息 + """ + provider_id: str = Field(..., description="唯一标识") + provider_type: str = Field(..., description="提供商类型: minimax, zhipu等") + name: str = Field(..., description="自定义名称") + api_key: str = Field(default="", description="API密钥(加密存储)") + base_url: str = Field(default="", description="API基础URL") + model: str = Field(..., description="使用的模型名称") + + # 代理配置 + use_proxy: bool = Field(default=False, description="是否使用代理") + proxy_config: Dict[str, Any] = Field(default_factory=dict, description="代理配置") + + # 速率限制 + rate_limit: Dict[str, int] = Field( + default_factory=lambda: {"requests_per_minute": 60, "tokens_per_minute": 100000}, + description="速率限制配置" + ) + + # 其他配置 + timeout: int = Field(default=60, description="超时时间(秒)") + extra_params: Dict[str, Any] = Field(default_factory=dict, description="额外参数") + + # 元数据 + enabled: bool = Field(default=True, description="是否启用") + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + + class Settings: + name = "ai_providers" + + def get_proxy_config(self) -> ProxyConfig: + """获取代理配置对象""" + return ProxyConfig.from_dict(self.proxy_config) + + def get_rate_limit(self) -> RateLimit: + """获取速率限制配置对象""" + return RateLimit.from_dict(self.rate_limit) + + class Config: + json_schema_extra = { + "example": { + "provider_id": "openrouter-gpt4", + "provider_type": "openrouter", + "name": "OpenRouter GPT-4", + "api_key": "sk-xxx", + "base_url": "https://openrouter.ai/api/v1", + "model": "openai/gpt-4-turbo", + "use_proxy": True, + "proxy_config": { + "http_proxy": "http://127.0.0.1:7890", + "https_proxy": "http://127.0.0.1:7890" + }, + "timeout": 60 + } + } diff --git a/backend/models/chatroom.py b/backend/models/chatroom.py new file mode 100644 index 0000000..808deac --- /dev/null +++ b/backend/models/chatroom.py @@ -0,0 +1,131 @@ +""" +聊天室数据模型 +定义讨论聊天室的配置结构 +""" +from datetime import datetime +from typing import Optional, Dict, Any, List +from enum import Enum +from pydantic import Field +from beanie import Document + + +class ChatRoomStatus(str, Enum): + """聊天室状态枚举""" + IDLE = "idle" # 空闲,等待开始 + ACTIVE = "active" # 讨论进行中 + PAUSED = "paused" # 暂停 + COMPLETED = "completed" # 已完成 + ERROR = "error" # 出错 + + +class ChatRoomConfig: + """聊天室配置""" + max_rounds: int = 50 # 最大轮数(备用终止条件) + message_history_size: int = 20 # 上下文消息数 + consensus_threshold: float = 0.8 # 共识阈值 + round_interval: float = 1.0 # 轮次间隔(秒) + allow_user_interrupt: bool = True # 允许用户中断 + + def __init__( + self, + max_rounds: int = 50, + message_history_size: int = 20, + consensus_threshold: float = 0.8, + round_interval: float = 1.0, + allow_user_interrupt: bool = True + ): + self.max_rounds = max_rounds + self.message_history_size = message_history_size + self.consensus_threshold = consensus_threshold + self.round_interval = round_interval + self.allow_user_interrupt = allow_user_interrupt + + def to_dict(self) -> Dict[str, Any]: + """转换为字典""" + return { + "max_rounds": self.max_rounds, + "message_history_size": self.message_history_size, + "consensus_threshold": self.consensus_threshold, + "round_interval": self.round_interval, + "allow_user_interrupt": self.allow_user_interrupt + } + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "ChatRoomConfig": + """从字典创建""" + if not data: + return cls() + return cls( + max_rounds=data.get("max_rounds", 50), + message_history_size=data.get("message_history_size", 20), + consensus_threshold=data.get("consensus_threshold", 0.8), + round_interval=data.get("round_interval", 1.0), + allow_user_interrupt=data.get("allow_user_interrupt", True) + ) + + +class ChatRoom(Document): + """ + 聊天室文档模型 + 存储讨论聊天室的配置信息 + """ + room_id: str = Field(..., description="唯一标识") + name: str = Field(..., description="聊天室名称") + description: str = Field(default="", description="描述") + objective: str = Field(default="", description="当前讨论目标") + + # 参与者 + agents: List[str] = Field(default_factory=list, description="Agent ID列表") + moderator_agent_id: Optional[str] = Field(default=None, description="共识判断Agent ID") + + # 配置 + config: Dict[str, Any] = Field( + default_factory=lambda: { + "max_rounds": 50, + "message_history_size": 20, + "consensus_threshold": 0.8, + "round_interval": 1.0, + "allow_user_interrupt": True + }, + description="聊天室配置" + ) + + # 状态 + status: str = Field(default=ChatRoomStatus.IDLE.value, description="当前状态") + current_round: int = Field(default=0, description="当前轮次") + current_discussion_id: Optional[str] = Field(default=None, description="当前讨论ID") + + # 元数据 + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + completed_at: Optional[datetime] = Field(default=None, description="完成时间") + + class Settings: + name = "chatrooms" + + def get_config(self) -> ChatRoomConfig: + """获取配置对象""" + return ChatRoomConfig.from_dict(self.config) + + def is_active(self) -> bool: + """检查聊天室是否处于活动状态""" + return self.status == ChatRoomStatus.ACTIVE.value + + class Config: + json_schema_extra = { + "example": { + "room_id": "product-design-room", + "name": "产品设计讨论室", + "description": "用于讨论新产品功能设计", + "objective": "设计一个用户友好的登录系统", + "agents": ["product-manager", "designer", "developer"], + "moderator_agent_id": "moderator", + "config": { + "max_rounds": 50, + "message_history_size": 20, + "consensus_threshold": 0.8 + }, + "status": "idle", + "current_round": 0 + } + } diff --git a/backend/models/discussion_result.py b/backend/models/discussion_result.py new file mode 100644 index 0000000..640f246 --- /dev/null +++ b/backend/models/discussion_result.py @@ -0,0 +1,126 @@ +""" +讨论结果数据模型 +定义讨论结果的结构 +""" +from datetime import datetime +from typing import Optional, Dict, Any, List +from pydantic import Field +from beanie import Document + + +class DiscussionResult(Document): + """ + 讨论结果文档模型 + 存储讨论的最终结果 + """ + discussion_id: str = Field(..., description="讨论唯一标识") + room_id: str = Field(..., description="聊天室ID") + objective: str = Field(..., description="讨论目标") + + # 共识结果 + consensus_reached: bool = Field(default=False, description="是否达成共识") + confidence: float = Field(default=0.0, ge=0, le=1, description="共识置信度") + + # 结果摘要 + summary: str = Field(default="", description="讨论结果摘要") + action_items: List[str] = Field(default_factory=list, description="行动项列表") + unresolved_issues: List[str] = Field(default_factory=list, description="未解决的问题") + key_decisions: List[str] = Field(default_factory=list, description="关键决策") + + # 统计信息 + total_rounds: int = Field(default=0, description="总轮数") + total_messages: int = Field(default=0, description="总消息数") + participating_agents: List[str] = Field(default_factory=list, description="参与的Agent列表") + agent_contributions: Dict[str, int] = Field( + default_factory=dict, + description="各Agent发言次数统计" + ) + + # 状态 + status: str = Field(default="in_progress", description="状态: in_progress, completed, failed") + end_reason: str = Field(default="", description="结束原因") + + # 时间戳 + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + completed_at: Optional[datetime] = Field(default=None, description="完成时间") + + class Settings: + name = "discussions" + indexes = [ + [("room_id", 1)], + [("created_at", -1)], + ] + + def mark_completed( + self, + consensus_reached: bool, + confidence: float, + summary: str, + action_items: List[str] = None, + unresolved_issues: List[str] = None, + end_reason: str = "consensus" + ) -> None: + """ + 标记讨论为已完成 + + Args: + consensus_reached: 是否达成共识 + confidence: 置信度 + summary: 结果摘要 + action_items: 行动项 + unresolved_issues: 未解决问题 + end_reason: 结束原因 + """ + self.consensus_reached = consensus_reached + self.confidence = confidence + self.summary = summary + self.action_items = action_items or [] + self.unresolved_issues = unresolved_issues or [] + self.status = "completed" + self.end_reason = end_reason + self.completed_at = datetime.utcnow() + self.updated_at = datetime.utcnow() + + def update_stats( + self, + total_rounds: int, + total_messages: int, + agent_contributions: Dict[str, int] + ) -> None: + """ + 更新统计信息 + + Args: + total_rounds: 总轮数 + total_messages: 总消息数 + agent_contributions: Agent贡献统计 + """ + self.total_rounds = total_rounds + self.total_messages = total_messages + self.agent_contributions = agent_contributions + self.participating_agents = list(agent_contributions.keys()) + self.updated_at = datetime.utcnow() + + class Config: + json_schema_extra = { + "example": { + "discussion_id": "disc-001", + "room_id": "product-design-room", + "objective": "设计用户登录系统", + "consensus_reached": True, + "confidence": 0.85, + "summary": "团队一致同意采用OAuth2.0 + 手机验证码的混合认证方案...", + "action_items": [ + "设计OAuth2.0集成方案", + "开发短信验证服务", + "编写安全测试用例" + ], + "unresolved_issues": [ + "第三方登录的优先级排序" + ], + "total_rounds": 15, + "total_messages": 45, + "status": "completed" + } + } diff --git a/backend/models/message.py b/backend/models/message.py new file mode 100644 index 0000000..9125b1e --- /dev/null +++ b/backend/models/message.py @@ -0,0 +1,123 @@ +""" +消息数据模型 +定义聊天消息的结构 +""" +from datetime import datetime +from typing import Optional, Dict, Any, List +from enum import Enum +from pydantic import Field +from beanie import Document + + +class MessageType(str, Enum): + """消息类型枚举""" + TEXT = "text" # 纯文本 + IMAGE = "image" # 图片 + FILE = "file" # 文件 + SYSTEM = "system" # 系统消息 + ACTION = "action" # 动作消息(如调用工具) + + +class MessageAttachment: + """消息附件""" + attachment_type: str # 附件类型: image, file + url: str # 资源URL + name: str # 文件名 + size: int = 0 # 文件大小(字节) + mime_type: str = "" # MIME类型 + + def __init__( + self, + attachment_type: str, + url: str, + name: str, + size: int = 0, + mime_type: str = "" + ): + self.attachment_type = attachment_type + self.url = url + self.name = name + self.size = size + self.mime_type = mime_type + + def to_dict(self) -> Dict[str, Any]: + """转换为字典""" + return { + "attachment_type": self.attachment_type, + "url": self.url, + "name": self.name, + "size": self.size, + "mime_type": self.mime_type + } + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "MessageAttachment": + """从字典创建""" + return cls( + attachment_type=data.get("attachment_type", ""), + url=data.get("url", ""), + name=data.get("name", ""), + size=data.get("size", 0), + mime_type=data.get("mime_type", "") + ) + + +class Message(Document): + """ + 消息文档模型 + 存储聊天消息 + """ + message_id: str = Field(..., description="唯一标识") + room_id: str = Field(..., description="聊天室ID") + discussion_id: str = Field(..., description="讨论ID") + agent_id: Optional[str] = Field(default=None, description="发送Agent ID(系统消息为空)") + + # 消息内容 + content: str = Field(..., description="消息内容") + message_type: str = Field(default=MessageType.TEXT.value, description="消息类型") + attachments: List[Dict[str, Any]] = Field(default_factory=list, description="附件列表") + + # 元数据 + round: int = Field(default=0, description="所属轮次") + token_count: int = Field(default=0, description="token数量") + + # 工具调用相关 + tool_calls: List[Dict[str, Any]] = Field(default_factory=list, description="工具调用记录") + tool_results: List[Dict[str, Any]] = Field(default_factory=list, description="工具调用结果") + + # 时间戳 + created_at: datetime = Field(default_factory=datetime.utcnow) + + class Settings: + name = "messages" + indexes = [ + [("room_id", 1), ("created_at", 1)], + [("discussion_id", 1)], + [("agent_id", 1)], + ] + + def get_attachments(self) -> List[MessageAttachment]: + """获取附件对象列表""" + return [MessageAttachment.from_dict(a) for a in self.attachments] + + def is_from_agent(self, agent_id: str) -> bool: + """检查消息是否来自指定Agent""" + return self.agent_id == agent_id + + def is_system_message(self) -> bool: + """检查是否为系统消息""" + return self.message_type == MessageType.SYSTEM.value + + class Config: + json_schema_extra = { + "example": { + "message_id": "msg-001", + "room_id": "product-design-room", + "discussion_id": "disc-001", + "agent_id": "product-manager", + "content": "我认为登录系统应该支持多种认证方式...", + "message_type": "text", + "round": 1, + "token_count": 150 + } + } diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 0000000..807c1c3 --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,42 @@ +# FastAPI and server +fastapi==0.109.0 +uvicorn[standard]==0.27.0 +python-multipart==0.0.6 +websockets==12.0 + +# MongoDB +motor==3.3.2 +pymongo==4.6.1 +beanie==1.25.0 + +# HTTP client +httpx==0.26.0 +aiohttp==3.9.1 + +# AI SDK clients +openai==1.12.0 +google-generativeai==0.3.2 +zhipuai==2.0.1 + +# Data validation +pydantic==2.6.0 +pydantic-settings==2.1.0 + +# Security +cryptography==42.0.2 +python-jose[cryptography]==3.3.0 +passlib[bcrypt]==1.7.4 + +# Utilities +python-dotenv==1.0.1 +tenacity==8.2.3 +numpy==1.26.4 + +# For embeddings and vector search +sentence-transformers==2.3.1 + +# Rate limiting +slowapi==0.1.9 + +# Logging +loguru==0.7.2 diff --git a/backend/routers/__init__.py b/backend/routers/__init__.py new file mode 100644 index 0000000..6c6e4e1 --- /dev/null +++ b/backend/routers/__init__.py @@ -0,0 +1,14 @@ +""" +API路由模块 +""" +from . import providers +from . import agents +from . import chatrooms +from . import discussions + +__all__ = [ + "providers", + "agents", + "chatrooms", + "discussions", +] diff --git a/backend/routers/agents.py b/backend/routers/agents.py new file mode 100644 index 0000000..4c29a7a --- /dev/null +++ b/backend/routers/agents.py @@ -0,0 +1,314 @@ +""" +Agent管理路由 +""" +from typing import List, Optional, Dict, Any +from fastapi import APIRouter, HTTPException, status +from pydantic import BaseModel, Field +from loguru import logger + +from services.agent_service import AgentService, AGENT_TEMPLATES + + +router = APIRouter() + + +# ============ 请求/响应模型 ============ + +class CapabilitiesModel(BaseModel): + """能力配置模型""" + memory_enabled: bool = False + mcp_tools: List[str] = [] + skills: List[str] = [] + multimodal: bool = False + + +class BehaviorModel(BaseModel): + """行为配置模型""" + speak_threshold: float = 0.5 + max_speak_per_round: int = 2 + speak_style: str = "balanced" + + +class AgentCreateRequest(BaseModel): + """创建Agent请求""" + name: str = Field(..., description="Agent名称") + role: str = Field(..., description="角色定义") + system_prompt: str = Field(..., description="系统提示词") + provider_id: str = Field(..., description="使用的AI接口ID") + temperature: float = Field(default=0.7, ge=0, le=2, description="温度参数") + max_tokens: int = Field(default=2000, gt=0, description="最大token数") + capabilities: Optional[CapabilitiesModel] = None + behavior: Optional[BehaviorModel] = None + avatar: Optional[str] = None + color: str = "#1890ff" + + class Config: + json_schema_extra = { + "example": { + "name": "产品经理", + "role": "产品规划和需求分析专家", + "system_prompt": "你是一位经验丰富的产品经理...", + "provider_id": "openrouter-abc123", + "temperature": 0.7, + "max_tokens": 2000 + } + } + + +class AgentUpdateRequest(BaseModel): + """更新Agent请求""" + name: Optional[str] = None + role: Optional[str] = None + system_prompt: Optional[str] = None + provider_id: Optional[str] = None + temperature: Optional[float] = Field(default=None, ge=0, le=2) + max_tokens: Optional[int] = Field(default=None, gt=0) + capabilities: Optional[CapabilitiesModel] = None + behavior: Optional[BehaviorModel] = None + avatar: Optional[str] = None + color: Optional[str] = None + enabled: Optional[bool] = None + + +class AgentResponse(BaseModel): + """Agent响应""" + agent_id: str + name: str + role: str + system_prompt: str + provider_id: str + temperature: float + max_tokens: int + capabilities: Dict[str, Any] + behavior: Dict[str, Any] + avatar: Optional[str] + color: str + enabled: bool + created_at: str + updated_at: str + + +class AgentTestRequest(BaseModel): + """Agent测试请求""" + message: str = "你好,请简单介绍一下你自己。" + + +class AgentTestResponse(BaseModel): + """Agent测试响应""" + success: bool + message: str + response: Optional[str] = None + model: Optional[str] = None + tokens: Optional[int] = None + latency_ms: Optional[float] = None + + +class TemplateResponse(BaseModel): + """模板响应""" + template_id: str + name: str + role: str + system_prompt: str + color: str + + +class GeneratePromptRequest(BaseModel): + """生成提示词请求""" + provider_id: str = Field(..., description="使用的AI接口ID") + name: str = Field(..., description="Agent名称") + role: str = Field(..., description="角色定位") + description: Optional[str] = Field(None, description="额外描述(可选)") + + +class GeneratePromptResponse(BaseModel): + """生成提示词响应""" + success: bool + message: Optional[str] = None + prompt: Optional[str] = None + model: Optional[str] = None + tokens: Optional[int] = None + + +# ============ 路由处理 ============ + +@router.post("", response_model=AgentResponse, status_code=status.HTTP_201_CREATED) +async def create_agent(request: AgentCreateRequest): + """ + 创建新的Agent + """ + try: + agent = await AgentService.create_agent( + name=request.name, + role=request.role, + system_prompt=request.system_prompt, + provider_id=request.provider_id, + temperature=request.temperature, + max_tokens=request.max_tokens, + capabilities=request.capabilities.dict() if request.capabilities else None, + behavior=request.behavior.dict() if request.behavior else None, + avatar=request.avatar, + color=request.color + ) + + return _to_response(agent) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"创建Agent失败: {e}") + raise HTTPException(status_code=500, detail="创建失败") + + +@router.get("", response_model=List[AgentResponse]) +async def list_agents(enabled_only: bool = False): + """ + 获取所有Agent + """ + agents = await AgentService.get_all_agents(enabled_only) + return [_to_response(a) for a in agents] + + +@router.get("/templates", response_model=List[TemplateResponse]) +async def list_templates(): + """ + 获取Agent预设模板 + """ + return [ + TemplateResponse( + template_id=tid, + name=t["name"], + role=t["role"], + system_prompt=t["system_prompt"], + color=t["color"] + ) + for tid, t in AGENT_TEMPLATES.items() + ] + + +@router.post("/generate-prompt", response_model=GeneratePromptResponse) +async def generate_prompt(request: GeneratePromptRequest): + """ + 使用AI生成Agent系统提示词 + """ + result = await AgentService.generate_system_prompt( + provider_id=request.provider_id, + name=request.name, + role=request.role, + description=request.description + ) + return GeneratePromptResponse(**result) + + +@router.get("/{agent_id}", response_model=AgentResponse) +async def get_agent(agent_id: str): + """ + 获取指定Agent + """ + agent = await AgentService.get_agent(agent_id) + if not agent: + raise HTTPException(status_code=404, detail="Agent不存在") + return _to_response(agent) + + +@router.put("/{agent_id}", response_model=AgentResponse) +async def update_agent(agent_id: str, request: AgentUpdateRequest): + """ + 更新Agent配置 + """ + update_data = request.dict(exclude_unset=True) + + # 转换嵌套模型 + if "capabilities" in update_data and update_data["capabilities"]: + if hasattr(update_data["capabilities"], "dict"): + update_data["capabilities"] = update_data["capabilities"].dict() + if "behavior" in update_data and update_data["behavior"]: + if hasattr(update_data["behavior"], "dict"): + update_data["behavior"] = update_data["behavior"].dict() + + try: + agent = await AgentService.update_agent(agent_id, **update_data) + if not agent: + raise HTTPException(status_code=404, detail="Agent不存在") + return _to_response(agent) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@router.delete("/{agent_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_agent(agent_id: str): + """ + 删除Agent + """ + success = await AgentService.delete_agent(agent_id) + if not success: + raise HTTPException(status_code=404, detail="Agent不存在") + + +@router.post("/{agent_id}/test", response_model=AgentTestResponse) +async def test_agent(agent_id: str, request: AgentTestRequest = None): + """ + 测试Agent对话 + """ + message = request.message if request else "你好,请简单介绍一下你自己。" + result = await AgentService.test_agent(agent_id, message) + return AgentTestResponse(**result) + + +@router.post("/{agent_id}/duplicate", response_model=AgentResponse) +async def duplicate_agent(agent_id: str, new_name: Optional[str] = None): + """ + 复制Agent + """ + agent = await AgentService.duplicate_agent(agent_id, new_name) + if not agent: + raise HTTPException(status_code=404, detail="源Agent不存在") + return _to_response(agent) + + +@router.post("/from-template/{template_id}", response_model=AgentResponse) +async def create_from_template(template_id: str, provider_id: str): + """ + 从模板创建Agent + """ + if template_id not in AGENT_TEMPLATES: + raise HTTPException(status_code=404, detail="模板不存在") + + template = AGENT_TEMPLATES[template_id] + + try: + agent = await AgentService.create_agent( + name=template["name"], + role=template["role"], + system_prompt=template["system_prompt"], + provider_id=provider_id, + color=template["color"] + ) + return _to_response(agent) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +# ============ 辅助函数 ============ + +def _to_response(agent) -> AgentResponse: + """ + 转换为响应模型 + """ + return AgentResponse( + agent_id=agent.agent_id, + name=agent.name, + role=agent.role, + system_prompt=agent.system_prompt, + provider_id=agent.provider_id, + temperature=agent.temperature, + max_tokens=agent.max_tokens, + capabilities=agent.capabilities, + behavior=agent.behavior, + avatar=agent.avatar, + color=agent.color, + enabled=agent.enabled, + created_at=agent.created_at.isoformat(), + updated_at=agent.updated_at.isoformat() + ) diff --git a/backend/routers/chatrooms.py b/backend/routers/chatrooms.py new file mode 100644 index 0000000..1755b32 --- /dev/null +++ b/backend/routers/chatrooms.py @@ -0,0 +1,387 @@ +""" +聊天室管理路由 +""" +from typing import List, Optional, Dict, Any +from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect, status +from pydantic import BaseModel, Field +from loguru import logger + +from services.chatroom_service import ChatRoomService +from services.discussion_engine import DiscussionEngine +from services.message_router import MessageRouter + + +router = APIRouter() + + +# ============ 请求/响应模型 ============ + +class ChatRoomConfigModel(BaseModel): + """聊天室配置模型""" + max_rounds: int = 50 + message_history_size: int = 20 + consensus_threshold: float = 0.8 + round_interval: float = 1.0 + allow_user_interrupt: bool = True + + +class ChatRoomCreateRequest(BaseModel): + """创建聊天室请求""" + name: str = Field(..., description="聊天室名称") + description: str = Field(default="", description="描述") + agents: List[str] = Field(default=[], description="Agent ID列表") + moderator_agent_id: Optional[str] = Field(default=None, description="主持人Agent ID") + config: Optional[ChatRoomConfigModel] = None + + class Config: + json_schema_extra = { + "example": { + "name": "产品设计讨论室", + "description": "用于讨论新产品功能设计", + "agents": ["agent-abc123", "agent-def456"], + "moderator_agent_id": "agent-xyz789" + } + } + + +class ChatRoomUpdateRequest(BaseModel): + """更新聊天室请求""" + name: Optional[str] = None + description: Optional[str] = None + agents: Optional[List[str]] = None + moderator_agent_id: Optional[str] = None + config: Optional[ChatRoomConfigModel] = None + + +class ChatRoomResponse(BaseModel): + """聊天室响应""" + room_id: str + name: str + description: str + objective: str + agents: List[str] + moderator_agent_id: Optional[str] + config: Dict[str, Any] + status: str + current_round: int + current_discussion_id: Optional[str] + created_at: str + updated_at: str + completed_at: Optional[str] + + +class MessageResponse(BaseModel): + """消息响应""" + message_id: str + room_id: str + discussion_id: str + agent_id: Optional[str] + content: str + message_type: str + round: int + created_at: str + + +class StartDiscussionRequest(BaseModel): + """启动讨论请求""" + objective: str = Field(..., description="讨论目标") + + +class DiscussionStatusResponse(BaseModel): + """讨论状态响应""" + is_active: bool + room_id: str + discussion_id: Optional[str] = None + current_round: int = 0 + status: str + + +# ============ 路由处理 ============ + +@router.post("", response_model=ChatRoomResponse, status_code=status.HTTP_201_CREATED) +async def create_chatroom(request: ChatRoomCreateRequest): + """ + 创建新的聊天室 + """ + try: + chatroom = await ChatRoomService.create_chatroom( + name=request.name, + description=request.description, + agents=request.agents, + moderator_agent_id=request.moderator_agent_id, + config=request.config.dict() if request.config else None + ) + + return _to_response(chatroom) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"创建聊天室失败: {e}") + raise HTTPException(status_code=500, detail="创建失败") + + +@router.get("", response_model=List[ChatRoomResponse]) +async def list_chatrooms(): + """ + 获取所有聊天室 + """ + chatrooms = await ChatRoomService.get_all_chatrooms() + return [_to_response(c) for c in chatrooms] + + +@router.get("/{room_id}", response_model=ChatRoomResponse) +async def get_chatroom(room_id: str): + """ + 获取指定聊天室 + """ + chatroom = await ChatRoomService.get_chatroom(room_id) + if not chatroom: + raise HTTPException(status_code=404, detail="聊天室不存在") + return _to_response(chatroom) + + +@router.put("/{room_id}", response_model=ChatRoomResponse) +async def update_chatroom(room_id: str, request: ChatRoomUpdateRequest): + """ + 更新聊天室配置 + """ + update_data = request.dict(exclude_unset=True) + + if "config" in update_data and update_data["config"]: + if hasattr(update_data["config"], "dict"): + update_data["config"] = update_data["config"].dict() + + try: + chatroom = await ChatRoomService.update_chatroom(room_id, **update_data) + if not chatroom: + raise HTTPException(status_code=404, detail="聊天室不存在") + return _to_response(chatroom) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@router.delete("/{room_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_chatroom(room_id: str): + """ + 删除聊天室 + """ + success = await ChatRoomService.delete_chatroom(room_id) + if not success: + raise HTTPException(status_code=404, detail="聊天室不存在") + + +@router.post("/{room_id}/agents/{agent_id}", response_model=ChatRoomResponse) +async def add_agent_to_chatroom(room_id: str, agent_id: str): + """ + 向聊天室添加Agent + """ + try: + chatroom = await ChatRoomService.add_agent(room_id, agent_id) + if not chatroom: + raise HTTPException(status_code=404, detail="聊天室不存在") + return _to_response(chatroom) + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@router.delete("/{room_id}/agents/{agent_id}", response_model=ChatRoomResponse) +async def remove_agent_from_chatroom(room_id: str, agent_id: str): + """ + 从聊天室移除Agent + """ + chatroom = await ChatRoomService.remove_agent(room_id, agent_id) + if not chatroom: + raise HTTPException(status_code=404, detail="聊天室不存在") + return _to_response(chatroom) + + +@router.get("/{room_id}/messages", response_model=List[MessageResponse]) +async def get_chatroom_messages( + room_id: str, + limit: int = 50, + skip: int = 0, + discussion_id: Optional[str] = None +): + """ + 获取聊天室消息历史 + """ + messages = await ChatRoomService.get_messages( + room_id, limit, skip, discussion_id + ) + return [_message_to_response(m) for m in messages] + + +@router.post("/{room_id}/start", response_model=DiscussionStatusResponse) +async def start_discussion(room_id: str, request: StartDiscussionRequest): + """ + 启动讨论 + """ + try: + # 异步启动讨论(不等待完成) + import asyncio + asyncio.create_task( + DiscussionEngine.start_discussion(room_id, request.objective) + ) + + # 等待一小段时间让讨论初始化 + await asyncio.sleep(0.5) + + chatroom = await ChatRoomService.get_chatroom(room_id) + + return DiscussionStatusResponse( + is_active=True, + room_id=room_id, + discussion_id=chatroom.current_discussion_id if chatroom else None, + current_round=chatroom.current_round if chatroom else 0, + status=chatroom.status if chatroom else "unknown" + ) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@router.post("/{room_id}/pause", response_model=DiscussionStatusResponse) +async def pause_discussion(room_id: str): + """ + 暂停讨论 + """ + success = await DiscussionEngine.pause_discussion(room_id) + if not success: + raise HTTPException(status_code=400, detail="没有进行中的讨论") + + chatroom = await ChatRoomService.get_chatroom(room_id) + return DiscussionStatusResponse( + is_active=False, + room_id=room_id, + discussion_id=chatroom.current_discussion_id if chatroom else None, + current_round=chatroom.current_round if chatroom else 0, + status="paused" + ) + + +@router.post("/{room_id}/resume", response_model=DiscussionStatusResponse) +async def resume_discussion(room_id: str): + """ + 恢复讨论 + """ + success = await DiscussionEngine.resume_discussion(room_id) + if not success: + raise HTTPException(status_code=400, detail="聊天室不在暂停状态") + + chatroom = await ChatRoomService.get_chatroom(room_id) + return DiscussionStatusResponse( + is_active=True, + room_id=room_id, + discussion_id=chatroom.current_discussion_id if chatroom else None, + current_round=chatroom.current_round if chatroom else 0, + status="active" + ) + + +@router.post("/{room_id}/stop", response_model=DiscussionStatusResponse) +async def stop_discussion(room_id: str): + """ + 停止讨论 + """ + success = await DiscussionEngine.stop_discussion(room_id) + + chatroom = await ChatRoomService.get_chatroom(room_id) + return DiscussionStatusResponse( + is_active=False, + room_id=room_id, + discussion_id=chatroom.current_discussion_id if chatroom else None, + current_round=chatroom.current_round if chatroom else 0, + status="stopping" if success else chatroom.status if chatroom else "unknown" + ) + + +@router.get("/{room_id}/status", response_model=DiscussionStatusResponse) +async def get_discussion_status(room_id: str): + """ + 获取讨论状态 + """ + chatroom = await ChatRoomService.get_chatroom(room_id) + if not chatroom: + raise HTTPException(status_code=404, detail="聊天室不存在") + + is_active = DiscussionEngine.is_discussion_active(room_id) + + return DiscussionStatusResponse( + is_active=is_active, + room_id=room_id, + discussion_id=chatroom.current_discussion_id, + current_round=chatroom.current_round, + status=chatroom.status + ) + + +# ============ WebSocket端点 ============ + +@router.websocket("/ws/{room_id}") +async def chatroom_websocket(websocket: WebSocket, room_id: str): + """ + 聊天室WebSocket连接 + """ + # 验证聊天室存在 + chatroom = await ChatRoomService.get_chatroom(room_id) + if not chatroom: + await websocket.close(code=4004, reason="聊天室不存在") + return + + await MessageRouter.connect(room_id, websocket) + + try: + while True: + # 保持连接,接收客户端消息(如心跳) + data = await websocket.receive_text() + + # 处理心跳 + if data == "ping": + await websocket.send_text("pong") + + except WebSocketDisconnect: + await MessageRouter.disconnect(room_id, websocket) + except Exception as e: + logger.error(f"WebSocket错误: {e}") + await MessageRouter.disconnect(room_id, websocket) + + +# ============ 辅助函数 ============ + +def _to_response(chatroom) -> ChatRoomResponse: + """ + 转换为响应模型 + """ + return ChatRoomResponse( + room_id=chatroom.room_id, + name=chatroom.name, + description=chatroom.description, + objective=chatroom.objective, + agents=chatroom.agents, + moderator_agent_id=chatroom.moderator_agent_id, + config=chatroom.config, + status=chatroom.status, + current_round=chatroom.current_round, + current_discussion_id=chatroom.current_discussion_id, + created_at=chatroom.created_at.isoformat(), + updated_at=chatroom.updated_at.isoformat(), + completed_at=chatroom.completed_at.isoformat() if chatroom.completed_at else None + ) + + +def _message_to_response(message) -> MessageResponse: + """ + 转换消息为响应模型 + """ + return MessageResponse( + message_id=message.message_id, + room_id=message.room_id, + discussion_id=message.discussion_id, + agent_id=message.agent_id, + content=message.content, + message_type=message.message_type, + round=message.round, + created_at=message.created_at.isoformat() + ) diff --git a/backend/routers/discussions.py b/backend/routers/discussions.py new file mode 100644 index 0000000..8d94c04 --- /dev/null +++ b/backend/routers/discussions.py @@ -0,0 +1,136 @@ +""" +讨论结果路由 +""" +from typing import List, Optional, Dict, Any +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel + +from models.discussion_result import DiscussionResult + + +router = APIRouter() + + +# ============ 响应模型 ============ + +class DiscussionResponse(BaseModel): + """讨论结果响应""" + discussion_id: str + room_id: str + objective: str + consensus_reached: bool + confidence: float + summary: str + action_items: List[str] + unresolved_issues: List[str] + key_decisions: List[str] + total_rounds: int + total_messages: int + participating_agents: List[str] + agent_contributions: Dict[str, int] + status: str + end_reason: str + created_at: str + completed_at: Optional[str] + + +class DiscussionListResponse(BaseModel): + """讨论列表响应""" + discussions: List[DiscussionResponse] + total: int + + +# ============ 路由处理 ============ + +@router.get("", response_model=DiscussionListResponse) +async def list_discussions( + room_id: Optional[str] = None, + limit: int = 20, + skip: int = 0 +): + """ + 获取讨论结果列表 + """ + query = {} + if room_id: + query["room_id"] = room_id + + discussions = await DiscussionResult.find(query).sort( + "-created_at" + ).skip(skip).limit(limit).to_list() + + total = await DiscussionResult.find(query).count() + + return DiscussionListResponse( + discussions=[_to_response(d) for d in discussions], + total=total + ) + + +@router.get("/{discussion_id}", response_model=DiscussionResponse) +async def get_discussion(discussion_id: str): + """ + 获取指定讨论结果 + """ + discussion = await DiscussionResult.find_one( + DiscussionResult.discussion_id == discussion_id + ) + + if not discussion: + raise HTTPException(status_code=404, detail="讨论记录不存在") + + return _to_response(discussion) + + +@router.get("/room/{room_id}", response_model=List[DiscussionResponse]) +async def get_room_discussions(room_id: str, limit: int = 10): + """ + 获取聊天室的讨论历史 + """ + discussions = await DiscussionResult.find( + {"room_id": room_id} + ).sort("-created_at").limit(limit).to_list() + + return [_to_response(d) for d in discussions] + + +@router.get("/room/{room_id}/latest", response_model=DiscussionResponse) +async def get_latest_discussion(room_id: str): + """ + 获取聊天室最新的讨论结果 + """ + discussion = await DiscussionResult.find( + {"room_id": room_id} + ).sort("-created_at").first_or_none() + + if not discussion: + raise HTTPException(status_code=404, detail="没有找到讨论记录") + + return _to_response(discussion) + + +# ============ 辅助函数 ============ + +def _to_response(discussion: DiscussionResult) -> DiscussionResponse: + """ + 转换为响应模型 + """ + return DiscussionResponse( + discussion_id=discussion.discussion_id, + room_id=discussion.room_id, + objective=discussion.objective, + consensus_reached=discussion.consensus_reached, + confidence=discussion.confidence, + summary=discussion.summary, + action_items=discussion.action_items, + unresolved_issues=discussion.unresolved_issues, + key_decisions=discussion.key_decisions, + total_rounds=discussion.total_rounds, + total_messages=discussion.total_messages, + participating_agents=discussion.participating_agents, + agent_contributions=discussion.agent_contributions, + status=discussion.status, + end_reason=discussion.end_reason, + created_at=discussion.created_at.isoformat(), + completed_at=discussion.completed_at.isoformat() if discussion.completed_at else None + ) diff --git a/backend/routers/providers.py b/backend/routers/providers.py new file mode 100644 index 0000000..806d90a --- /dev/null +++ b/backend/routers/providers.py @@ -0,0 +1,241 @@ +""" +AI接口管理路由 +""" +from typing import List, Optional, Dict, Any +from fastapi import APIRouter, HTTPException, status +from pydantic import BaseModel, Field +from loguru import logger + +from services.ai_provider_service import AIProviderService +from utils.encryption import mask_api_key + + +router = APIRouter() + + +# ============ 请求/响应模型 ============ + +class ProxyConfigModel(BaseModel): + """代理配置模型""" + http_proxy: Optional[str] = None + https_proxy: Optional[str] = None + no_proxy: List[str] = [] + + +class RateLimitModel(BaseModel): + """速率限制模型""" + requests_per_minute: int = 60 + tokens_per_minute: int = 100000 + + +class ProviderCreateRequest(BaseModel): + """创建AI接口请求""" + provider_type: str = Field(..., description="提供商类型: minimax, zhipu, openrouter, kimi, deepseek, gemini, ollama, llmstudio") + name: str = Field(..., description="自定义名称") + model: str = Field(..., description="模型名称") + api_key: str = Field(default="", description="API密钥") + base_url: str = Field(default="", description="API基础URL") + use_proxy: bool = Field(default=False, description="是否使用代理") + proxy_config: Optional[ProxyConfigModel] = None + rate_limit: Optional[RateLimitModel] = None + timeout: int = Field(default=60, description="超时时间(秒)") + extra_params: Dict[str, Any] = Field(default_factory=dict, description="额外参数") + + class Config: + json_schema_extra = { + "example": { + "provider_type": "openrouter", + "name": "OpenRouter GPT-4", + "model": "openai/gpt-4-turbo", + "api_key": "sk-xxx", + "use_proxy": True, + "proxy_config": { + "http_proxy": "http://127.0.0.1:7890", + "https_proxy": "http://127.0.0.1:7890" + } + } + } + + +class ProviderUpdateRequest(BaseModel): + """更新AI接口请求""" + name: Optional[str] = None + model: Optional[str] = None + api_key: Optional[str] = None + base_url: Optional[str] = None + use_proxy: Optional[bool] = None + proxy_config: Optional[ProxyConfigModel] = None + rate_limit: Optional[RateLimitModel] = None + timeout: Optional[int] = None + extra_params: Optional[Dict[str, Any]] = None + enabled: Optional[bool] = None + + +class ProviderResponse(BaseModel): + """AI接口响应""" + provider_id: str + provider_type: str + name: str + api_key_masked: str + base_url: str + model: str + use_proxy: bool + proxy_config: Dict[str, Any] + rate_limit: Dict[str, int] + timeout: int + extra_params: Dict[str, Any] + enabled: bool + created_at: str + updated_at: str + + +class TestConfigRequest(BaseModel): + """测试配置请求""" + provider_type: str + api_key: str + base_url: str = "" + model: str = "" + use_proxy: bool = False + proxy_config: Optional[ProxyConfigModel] = None + timeout: int = 30 + + +class TestResponse(BaseModel): + """测试响应""" + success: bool + message: str + model: Optional[str] = None + latency_ms: Optional[float] = None + + +# ============ 路由处理 ============ + +@router.post("", response_model=ProviderResponse, status_code=status.HTTP_201_CREATED) +async def create_provider(request: ProviderCreateRequest): + """ + 创建新的AI接口配置 + """ + try: + provider = await AIProviderService.create_provider( + provider_type=request.provider_type, + name=request.name, + model=request.model, + api_key=request.api_key, + base_url=request.base_url, + use_proxy=request.use_proxy, + proxy_config=request.proxy_config.dict() if request.proxy_config else None, + rate_limit=request.rate_limit.dict() if request.rate_limit else None, + timeout=request.timeout, + extra_params=request.extra_params + ) + + return _to_response(provider) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + logger.error(f"创建AI接口失败: {e}") + raise HTTPException(status_code=500, detail="创建失败") + + +@router.get("", response_model=List[ProviderResponse]) +async def list_providers(enabled_only: bool = False): + """ + 获取所有AI接口配置 + """ + providers = await AIProviderService.get_all_providers(enabled_only) + return [_to_response(p) for p in providers] + + +@router.get("/{provider_id}", response_model=ProviderResponse) +async def get_provider(provider_id: str): + """ + 获取指定AI接口配置 + """ + provider = await AIProviderService.get_provider(provider_id) + if not provider: + raise HTTPException(status_code=404, detail="AI接口不存在") + return _to_response(provider) + + +@router.put("/{provider_id}", response_model=ProviderResponse) +async def update_provider(provider_id: str, request: ProviderUpdateRequest): + """ + 更新AI接口配置 + """ + update_data = request.dict(exclude_unset=True) + + # 转换嵌套模型 + if "proxy_config" in update_data and update_data["proxy_config"]: + update_data["proxy_config"] = update_data["proxy_config"].dict() if hasattr(update_data["proxy_config"], "dict") else update_data["proxy_config"] + if "rate_limit" in update_data and update_data["rate_limit"]: + update_data["rate_limit"] = update_data["rate_limit"].dict() if hasattr(update_data["rate_limit"], "dict") else update_data["rate_limit"] + + try: + provider = await AIProviderService.update_provider(provider_id, **update_data) + if not provider: + raise HTTPException(status_code=404, detail="AI接口不存在") + return _to_response(provider) + + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + + +@router.delete("/{provider_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_provider(provider_id: str): + """ + 删除AI接口配置 + """ + success = await AIProviderService.delete_provider(provider_id) + if not success: + raise HTTPException(status_code=404, detail="AI接口不存在") + + +@router.post("/{provider_id}/test", response_model=TestResponse) +async def test_provider(provider_id: str): + """ + 测试AI接口连接 + """ + result = await AIProviderService.test_provider(provider_id) + return TestResponse(**result) + + +@router.post("/test", response_model=TestResponse) +async def test_provider_config(request: TestConfigRequest): + """ + 测试AI接口配置(不保存) + """ + result = await AIProviderService.test_provider_config( + provider_type=request.provider_type, + api_key=request.api_key, + base_url=request.base_url, + model=request.model, + use_proxy=request.use_proxy, + proxy_config=request.proxy_config.dict() if request.proxy_config else None, + timeout=request.timeout + ) + return TestResponse(**result) + + +# ============ 辅助函数 ============ + +def _to_response(provider) -> ProviderResponse: + """ + 转换为响应模型 + """ + return ProviderResponse( + provider_id=provider.provider_id, + provider_type=provider.provider_type, + name=provider.name, + api_key_masked=mask_api_key(provider.api_key) if provider.api_key else "", + base_url=provider.base_url, + model=provider.model, + use_proxy=provider.use_proxy, + proxy_config=provider.proxy_config, + rate_limit=provider.rate_limit, + timeout=provider.timeout, + extra_params=provider.extra_params, + enabled=provider.enabled, + created_at=provider.created_at.isoformat(), + updated_at=provider.updated_at.isoformat() + ) diff --git a/backend/services/__init__.py b/backend/services/__init__.py new file mode 100644 index 0000000..14bb6e9 --- /dev/null +++ b/backend/services/__init__.py @@ -0,0 +1,22 @@ +""" +业务服务模块 +""" +from .ai_provider_service import AIProviderService +from .agent_service import AgentService +from .chatroom_service import ChatRoomService +from .message_router import MessageRouter +from .discussion_engine import DiscussionEngine +from .consensus_manager import ConsensusManager +from .mcp_service import MCPService +from .memory_service import MemoryService + +__all__ = [ + "AIProviderService", + "AgentService", + "ChatRoomService", + "MessageRouter", + "DiscussionEngine", + "ConsensusManager", + "MCPService", + "MemoryService", +] diff --git a/backend/services/agent_service.py b/backend/services/agent_service.py new file mode 100644 index 0000000..590b8f8 --- /dev/null +++ b/backend/services/agent_service.py @@ -0,0 +1,438 @@ +""" +Agent服务 +管理AI代理的配置 +""" +import uuid +from datetime import datetime +from typing import List, Dict, Any, Optional +from loguru import logger + +from models.agent import Agent +from services.ai_provider_service import AIProviderService + + +class AgentService: + """ + Agent服务类 + 负责Agent的CRUD操作 + """ + + @classmethod + async def create_agent( + cls, + name: str, + role: str, + system_prompt: str, + provider_id: str, + temperature: float = 0.7, + max_tokens: int = 2000, + capabilities: Optional[Dict[str, Any]] = None, + behavior: Optional[Dict[str, Any]] = None, + avatar: Optional[str] = None, + color: str = "#1890ff" + ) -> Agent: + """ + 创建新的Agent + + Args: + name: Agent名称 + role: 角色定义 + system_prompt: 系统提示词 + provider_id: 使用的AI接口ID + temperature: 温度参数 + max_tokens: 最大token数 + capabilities: 能力配置 + behavior: 行为配置 + avatar: 头像URL + color: 代表颜色 + + Returns: + 创建的Agent文档 + """ + # 验证AI接口存在 + provider = await AIProviderService.get_provider(provider_id) + if not provider: + raise ValueError(f"AI接口不存在: {provider_id}") + + # 生成唯一ID + agent_id = f"agent-{uuid.uuid4().hex[:8]}" + + # 默认能力配置 + default_capabilities = { + "memory_enabled": False, + "mcp_tools": [], + "skills": [], + "multimodal": False + } + if capabilities: + default_capabilities.update(capabilities) + + # 默认行为配置 + default_behavior = { + "speak_threshold": 0.5, + "max_speak_per_round": 2, + "speak_style": "balanced" + } + if behavior: + default_behavior.update(behavior) + + # 创建文档 + agent = Agent( + agent_id=agent_id, + name=name, + role=role, + system_prompt=system_prompt, + provider_id=provider_id, + temperature=temperature, + max_tokens=max_tokens, + capabilities=default_capabilities, + behavior=default_behavior, + avatar=avatar, + color=color, + enabled=True, + created_at=datetime.utcnow(), + updated_at=datetime.utcnow() + ) + + await agent.insert() + + logger.info(f"创建Agent: {agent_id} ({name})") + return agent + + @classmethod + async def get_agent(cls, agent_id: str) -> Optional[Agent]: + """ + 获取指定Agent + + Args: + agent_id: Agent ID + + Returns: + Agent文档或None + """ + return await Agent.find_one(Agent.agent_id == agent_id) + + @classmethod + async def get_all_agents( + cls, + enabled_only: bool = False + ) -> List[Agent]: + """ + 获取所有Agent + + Args: + enabled_only: 是否只返回启用的Agent + + Returns: + Agent列表 + """ + if enabled_only: + return await Agent.find(Agent.enabled == True).to_list() + return await Agent.find_all().to_list() + + @classmethod + async def get_agents_by_ids( + cls, + agent_ids: List[str] + ) -> List[Agent]: + """ + 根据ID列表获取多个Agent + + Args: + agent_ids: Agent ID列表 + + Returns: + Agent列表 + """ + return await Agent.find( + {"agent_id": {"$in": agent_ids}} + ).to_list() + + @classmethod + async def update_agent( + cls, + agent_id: str, + **kwargs + ) -> Optional[Agent]: + """ + 更新Agent配置 + + Args: + agent_id: Agent ID + **kwargs: 要更新的字段 + + Returns: + 更新后的Agent或None + """ + agent = await cls.get_agent(agent_id) + if not agent: + return None + + # 如果更新了provider_id,验证其存在 + if "provider_id" in kwargs: + provider = await AIProviderService.get_provider(kwargs["provider_id"]) + if not provider: + raise ValueError(f"AI接口不存在: {kwargs['provider_id']}") + + # 更新字段 + kwargs["updated_at"] = datetime.utcnow() + + for key, value in kwargs.items(): + if hasattr(agent, key): + setattr(agent, key, value) + + await agent.save() + + logger.info(f"更新Agent: {agent_id}") + return agent + + @classmethod + async def delete_agent(cls, agent_id: str) -> bool: + """ + 删除Agent + + Args: + agent_id: Agent ID + + Returns: + 是否删除成功 + """ + agent = await cls.get_agent(agent_id) + if not agent: + return False + + await agent.delete() + + logger.info(f"删除Agent: {agent_id}") + return True + + @classmethod + async def test_agent( + cls, + agent_id: str, + test_message: str = "你好,请简单介绍一下你自己。" + ) -> Dict[str, Any]: + """ + 测试Agent对话 + + Args: + agent_id: Agent ID + test_message: 测试消息 + + Returns: + 测试结果 + """ + agent = await cls.get_agent(agent_id) + if not agent: + return { + "success": False, + "message": f"Agent不存在: {agent_id}" + } + + if not agent.enabled: + return { + "success": False, + "message": "Agent已禁用" + } + + # 构建消息 + messages = [ + {"role": "system", "content": agent.system_prompt}, + {"role": "user", "content": test_message} + ] + + # 调用AI接口 + response = await AIProviderService.chat( + provider_id=agent.provider_id, + messages=messages, + temperature=agent.temperature, + max_tokens=agent.max_tokens + ) + + if response.success: + return { + "success": True, + "message": "测试成功", + "response": response.content, + "model": response.model, + "tokens": response.total_tokens, + "latency_ms": response.latency_ms + } + else: + return { + "success": False, + "message": response.error + } + + @classmethod + async def duplicate_agent( + cls, + agent_id: str, + new_name: Optional[str] = None + ) -> Optional[Agent]: + """ + 复制Agent + + Args: + agent_id: 源Agent ID + new_name: 新Agent名称 + + Returns: + 新创建的Agent或None + """ + source_agent = await cls.get_agent(agent_id) + if not source_agent: + return None + + return await cls.create_agent( + name=new_name or f"{source_agent.name} (副本)", + role=source_agent.role, + system_prompt=source_agent.system_prompt, + provider_id=source_agent.provider_id, + temperature=source_agent.temperature, + max_tokens=source_agent.max_tokens, + capabilities=source_agent.capabilities, + behavior=source_agent.behavior, + avatar=source_agent.avatar, + color=source_agent.color + ) + + @classmethod + async def generate_system_prompt( + cls, + provider_id: str, + name: str, + role: str, + description: Optional[str] = None + ) -> Dict[str, Any]: + """ + 使用AI生成Agent系统提示词 + + Args: + provider_id: AI接口ID + name: Agent名称 + role: 角色定位 + description: 额外描述(可选) + + Returns: + 生成结果,包含success和生成的prompt + """ + # 验证AI接口存在 + provider = await AIProviderService.get_provider(provider_id) + if not provider: + return { + "success": False, + "message": f"AI接口不存在: {provider_id}" + } + + # 构建生成提示词的请求 + generate_prompt = f"""请为一个AI Agent编写系统提示词(system prompt)。 + +Agent名称:{name} +角色定位:{role} +{f'补充说明:{description}' if description else ''} + +要求: +1. 提示词应简洁专业,控制在200字以内 +2. 明确该Agent的核心职责和专业领域 +3. 说明在多Agent讨论中应该关注什么 +4. 使用中文编写 +5. 不要包含任何问候语或开场白,直接给出提示词内容 + +请直接输出系统提示词,不要有任何额外的解释或包装。""" + + try: + messages = [{"role": "user", "content": generate_prompt}] + + response = await AIProviderService.chat( + provider_id=provider_id, + messages=messages, + temperature=0.7, + max_tokens=1000 + ) + + if response.success: + # 清理可能的包装文本 + content = response.content.strip() + # 移除可能的markdown代码块标记 + if content.startswith("```"): + lines = content.split("\n") + content = "\n".join(lines[1:]) + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + return { + "success": True, + "prompt": content, + "model": response.model, + "tokens": response.total_tokens + } + else: + return { + "success": False, + "message": response.error or "生成失败" + } + except Exception as e: + logger.error(f"生成系统提示词失败: {e}") + return { + "success": False, + "message": f"生成失败: {str(e)}" + } + + +# Agent预设模板 +AGENT_TEMPLATES = { + "product_manager": { + "name": "产品经理", + "role": "产品规划和需求分析专家", + "system_prompt": """你是一位经验丰富的产品经理,擅长: +- 分析用户需求和痛点 +- 制定产品策略和路线图 +- 平衡业务目标和用户体验 +- 与团队协作推进产品迭代 + +在讨论中,你需要从产品角度出发,关注用户价值、商业可行性和优先级排序。 +请用专业但易懂的语言表达观点。""", + "color": "#1890ff" + }, + "developer": { + "name": "开发工程师", + "role": "技术实现和架构设计专家", + "system_prompt": """你是一位资深的软件开发工程师,擅长: +- 系统架构设计 +- 代码实现和优化 +- 技术方案评估 +- 性能和安全考量 + +在讨论中,你需要从技术角度出发,关注实现可行性、技术债务和最佳实践。 +请提供具体的技术建议和潜在风险评估。""", + "color": "#52c41a" + }, + "designer": { + "name": "设计师", + "role": "用户体验和界面设计专家", + "system_prompt": """你是一位专业的UI/UX设计师,擅长: +- 用户体验设计 +- 界面视觉设计 +- 交互流程优化 +- 设计系统构建 + +在讨论中,你需要从设计角度出发,关注用户体验、视觉美感和交互流畅性。 +请提供设计建议并考虑可用性和一致性。""", + "color": "#eb2f96" + }, + "moderator": { + "name": "主持人", + "role": "讨论主持和共识判断专家", + "system_prompt": """你是讨论的主持人,负责: +- 引导讨论方向 +- 总结各方观点 +- 判断是否达成共识 +- 提炼行动要点 + +在讨论中,你需要保持中立,促进有效沟通,并在适当时机总结讨论成果。 +当各方观点趋于一致时,请明确指出并总结共识内容。""", + "color": "#722ed1" + } +} diff --git a/backend/services/ai_provider_service.py b/backend/services/ai_provider_service.py new file mode 100644 index 0000000..11a1603 --- /dev/null +++ b/backend/services/ai_provider_service.py @@ -0,0 +1,364 @@ +""" +AI接口提供商服务 +管理AI接口的配置和调用 +""" +import uuid +from datetime import datetime +from typing import List, Dict, Any, Optional +from loguru import logger + +from models.ai_provider import AIProvider +from adapters import get_adapter, BaseAdapter, ChatMessage, AdapterResponse +from utils.encryption import encrypt_api_key, decrypt_api_key +from utils.rate_limiter import rate_limiter + + +class AIProviderService: + """ + AI接口提供商服务类 + 负责AI接口的CRUD操作和调用 + """ + + # 缓存适配器实例 + _adapter_cache: Dict[str, BaseAdapter] = {} + + @classmethod + async def create_provider( + cls, + provider_type: str, + name: str, + model: str, + api_key: str = "", + base_url: str = "", + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + rate_limit: Optional[Dict[str, int]] = None, + timeout: int = 60, + extra_params: Optional[Dict[str, Any]] = None + ) -> AIProvider: + """ + 创建新的AI接口配置 + + Args: + provider_type: 提供商类型 + name: 自定义名称 + model: 模型名称 + api_key: API密钥 + base_url: API基础URL + use_proxy: 是否使用代理 + proxy_config: 代理配置 + rate_limit: 速率限制配置 + timeout: 超时时间 + extra_params: 额外参数 + + Returns: + 创建的AIProvider文档 + """ + # 验证提供商类型 + try: + get_adapter(provider_type) + except ValueError as e: + raise ValueError(f"不支持的提供商类型: {provider_type}") + + # 生成唯一ID + provider_id = f"{provider_type}-{uuid.uuid4().hex[:8]}" + + # 加密API密钥 + encrypted_key = encrypt_api_key(api_key) if api_key else "" + + # 创建文档 + provider = AIProvider( + provider_id=provider_id, + provider_type=provider_type, + name=name, + api_key=encrypted_key, + base_url=base_url, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config or {}, + rate_limit=rate_limit or {"requests_per_minute": 60, "tokens_per_minute": 100000}, + timeout=timeout, + extra_params=extra_params or {}, + enabled=True, + created_at=datetime.utcnow(), + updated_at=datetime.utcnow() + ) + + await provider.insert() + + # 注册速率限制 + rate_limiter.register( + provider_id, + provider.rate_limit.get("requests_per_minute", 60), + provider.rate_limit.get("tokens_per_minute", 100000) + ) + + logger.info(f"创建AI接口配置: {provider_id} ({name})") + return provider + + @classmethod + async def get_provider(cls, provider_id: str) -> Optional[AIProvider]: + """ + 获取指定AI接口配置 + + Args: + provider_id: 接口ID + + Returns: + AIProvider文档或None + """ + return await AIProvider.find_one(AIProvider.provider_id == provider_id) + + @classmethod + async def get_all_providers( + cls, + enabled_only: bool = False + ) -> List[AIProvider]: + """ + 获取所有AI接口配置 + + Args: + enabled_only: 是否只返回启用的接口 + + Returns: + AIProvider列表 + """ + if enabled_only: + return await AIProvider.find(AIProvider.enabled == True).to_list() + return await AIProvider.find_all().to_list() + + @classmethod + async def update_provider( + cls, + provider_id: str, + **kwargs + ) -> Optional[AIProvider]: + """ + 更新AI接口配置 + + Args: + provider_id: 接口ID + **kwargs: 要更新的字段 + + Returns: + 更新后的AIProvider或None + """ + provider = await cls.get_provider(provider_id) + if not provider: + return None + + # 如果更新了API密钥,需要加密 + if "api_key" in kwargs and kwargs["api_key"]: + kwargs["api_key"] = encrypt_api_key(kwargs["api_key"]) + + # 更新字段 + kwargs["updated_at"] = datetime.utcnow() + + for key, value in kwargs.items(): + if hasattr(provider, key): + setattr(provider, key, value) + + await provider.save() + + # 清除适配器缓存 + cls._adapter_cache.pop(provider_id, None) + + # 更新速率限制 + if "rate_limit" in kwargs: + rate_limiter.unregister(provider_id) + rate_limiter.register( + provider_id, + provider.rate_limit.get("requests_per_minute", 60), + provider.rate_limit.get("tokens_per_minute", 100000) + ) + + logger.info(f"更新AI接口配置: {provider_id}") + return provider + + @classmethod + async def delete_provider(cls, provider_id: str) -> bool: + """ + 删除AI接口配置 + + Args: + provider_id: 接口ID + + Returns: + 是否删除成功 + """ + provider = await cls.get_provider(provider_id) + if not provider: + return False + + await provider.delete() + + # 清除缓存和速率限制 + cls._adapter_cache.pop(provider_id, None) + rate_limiter.unregister(provider_id) + + logger.info(f"删除AI接口配置: {provider_id}") + return True + + @classmethod + async def get_adapter(cls, provider_id: str) -> Optional[BaseAdapter]: + """ + 获取AI接口的适配器实例 + + Args: + provider_id: 接口ID + + Returns: + 适配器实例或None + """ + # 检查缓存 + if provider_id in cls._adapter_cache: + return cls._adapter_cache[provider_id] + + provider = await cls.get_provider(provider_id) + if not provider or not provider.enabled: + return None + + # 解密API密钥 + api_key = decrypt_api_key(provider.api_key) if provider.api_key else "" + + # 创建适配器 + adapter_class = get_adapter(provider.provider_type) + adapter = adapter_class( + api_key=api_key, + base_url=provider.base_url, + model=provider.model, + use_proxy=provider.use_proxy, + proxy_config=provider.proxy_config, + timeout=provider.timeout, + **provider.extra_params + ) + + # 缓存适配器 + cls._adapter_cache[provider_id] = adapter + + return adapter + + @classmethod + async def chat( + cls, + provider_id: str, + messages: List[Dict[str, str]], + temperature: float = 0.7, + max_tokens: int = 2000, + **kwargs + ) -> AdapterResponse: + """ + 调用AI接口进行对话 + + Args: + provider_id: 接口ID + messages: 消息列表 [{"role": "user", "content": "..."}] + temperature: 温度参数 + max_tokens: 最大token数 + **kwargs: 额外参数 + + Returns: + 适配器响应 + """ + adapter = await cls.get_adapter(provider_id) + if not adapter: + return AdapterResponse( + success=False, + error=f"AI接口不存在或未启用: {provider_id}" + ) + + # 检查速率限制 + estimated_tokens = sum(len(m.get("content", "")) for m in messages) // 4 + if not await rate_limiter.acquire_wait(provider_id, estimated_tokens): + return AdapterResponse( + success=False, + error="请求频率超限,请稍后重试" + ) + + # 转换消息格式 + chat_messages = [ + ChatMessage( + role=m.get("role", "user"), + content=m.get("content", ""), + name=m.get("name") + ) + for m in messages + ] + + # 调用适配器 + response = await adapter.chat( + messages=chat_messages, + temperature=temperature, + max_tokens=max_tokens, + **kwargs + ) + + return response + + @classmethod + async def test_provider(cls, provider_id: str) -> Dict[str, Any]: + """ + 测试AI接口连接 + + Args: + provider_id: 接口ID + + Returns: + 测试结果 + """ + adapter = await cls.get_adapter(provider_id) + if not adapter: + return { + "success": False, + "message": f"AI接口不存在或未启用: {provider_id}" + } + + return await adapter.test_connection() + + @classmethod + async def test_provider_config( + cls, + provider_type: str, + api_key: str, + base_url: str = "", + model: str = "", + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 30, + **kwargs + ) -> Dict[str, Any]: + """ + 测试AI接口配置(不保存) + + Args: + provider_type: 提供商类型 + api_key: API密钥 + base_url: API基础URL + model: 模型名称 + use_proxy: 是否使用代理 + proxy_config: 代理配置 + timeout: 超时时间 + **kwargs: 额外参数 + + Returns: + 测试结果 + """ + try: + adapter_class = get_adapter(provider_type) + except ValueError: + return { + "success": False, + "message": f"不支持的提供商类型: {provider_type}" + } + + adapter = adapter_class( + api_key=api_key, + base_url=base_url, + model=model, + use_proxy=use_proxy, + proxy_config=proxy_config, + timeout=timeout, + **kwargs + ) + + return await adapter.test_connection() diff --git a/backend/services/chatroom_service.py b/backend/services/chatroom_service.py new file mode 100644 index 0000000..e3ec25a --- /dev/null +++ b/backend/services/chatroom_service.py @@ -0,0 +1,357 @@ +""" +聊天室服务 +管理聊天室的创建和状态 +""" +import uuid +from datetime import datetime +from typing import List, Dict, Any, Optional +from loguru import logger + +from models.chatroom import ChatRoom, ChatRoomStatus +from models.message import Message +from services.agent_service import AgentService + + +class ChatRoomService: + """ + 聊天室服务类 + 负责聊天室的CRUD操作 + """ + + @classmethod + async def create_chatroom( + cls, + name: str, + description: str = "", + agents: Optional[List[str]] = None, + moderator_agent_id: Optional[str] = None, + config: Optional[Dict[str, Any]] = None + ) -> ChatRoom: + """ + 创建新的聊天室 + + Args: + name: 聊天室名称 + description: 描述 + agents: Agent ID列表 + moderator_agent_id: 主持人Agent ID + config: 聊天室配置 + + Returns: + 创建的ChatRoom文档 + """ + # 验证Agent存在 + if agents: + existing_agents = await AgentService.get_agents_by_ids(agents) + existing_ids = {a.agent_id for a in existing_agents} + missing_ids = set(agents) - existing_ids + if missing_ids: + raise ValueError(f"Agent不存在: {', '.join(missing_ids)}") + + # 验证主持人Agent + if moderator_agent_id: + moderator = await AgentService.get_agent(moderator_agent_id) + if not moderator: + raise ValueError(f"主持人Agent不存在: {moderator_agent_id}") + + # 生成唯一ID + room_id = f"room-{uuid.uuid4().hex[:8]}" + + # 默认配置 + default_config = { + "max_rounds": 50, + "message_history_size": 20, + "consensus_threshold": 0.8, + "round_interval": 1.0, + "allow_user_interrupt": True + } + if config: + default_config.update(config) + + # 创建文档 + chatroom = ChatRoom( + room_id=room_id, + name=name, + description=description, + objective="", + agents=agents or [], + moderator_agent_id=moderator_agent_id, + config=default_config, + status=ChatRoomStatus.IDLE.value, + current_round=0, + created_at=datetime.utcnow(), + updated_at=datetime.utcnow() + ) + + await chatroom.insert() + + logger.info(f"创建聊天室: {room_id} ({name})") + return chatroom + + @classmethod + async def get_chatroom(cls, room_id: str) -> Optional[ChatRoom]: + """ + 获取指定聊天室 + + Args: + room_id: 聊天室ID + + Returns: + ChatRoom文档或None + """ + return await ChatRoom.find_one(ChatRoom.room_id == room_id) + + @classmethod + async def get_all_chatrooms(cls) -> List[ChatRoom]: + """ + 获取所有聊天室 + + Returns: + ChatRoom列表 + """ + return await ChatRoom.find_all().to_list() + + @classmethod + async def update_chatroom( + cls, + room_id: str, + **kwargs + ) -> Optional[ChatRoom]: + """ + 更新聊天室配置 + + Args: + room_id: 聊天室ID + **kwargs: 要更新的字段 + + Returns: + 更新后的ChatRoom或None + """ + chatroom = await cls.get_chatroom(room_id) + if not chatroom: + return None + + # 验证Agent + if "agents" in kwargs: + existing_agents = await AgentService.get_agents_by_ids(kwargs["agents"]) + existing_ids = {a.agent_id for a in existing_agents} + missing_ids = set(kwargs["agents"]) - existing_ids + if missing_ids: + raise ValueError(f"Agent不存在: {', '.join(missing_ids)}") + + # 验证主持人 + if "moderator_agent_id" in kwargs and kwargs["moderator_agent_id"]: + moderator = await AgentService.get_agent(kwargs["moderator_agent_id"]) + if not moderator: + raise ValueError(f"主持人Agent不存在: {kwargs['moderator_agent_id']}") + + # 更新字段 + kwargs["updated_at"] = datetime.utcnow() + + for key, value in kwargs.items(): + if hasattr(chatroom, key): + setattr(chatroom, key, value) + + await chatroom.save() + + logger.info(f"更新聊天室: {room_id}") + return chatroom + + @classmethod + async def delete_chatroom(cls, room_id: str) -> bool: + """ + 删除聊天室 + + Args: + room_id: 聊天室ID + + Returns: + 是否删除成功 + """ + chatroom = await cls.get_chatroom(room_id) + if not chatroom: + return False + + # 删除相关消息 + await Message.find(Message.room_id == room_id).delete() + + await chatroom.delete() + + logger.info(f"删除聊天室: {room_id}") + return True + + @classmethod + async def add_agent(cls, room_id: str, agent_id: str) -> Optional[ChatRoom]: + """ + 向聊天室添加Agent + + Args: + room_id: 聊天室ID + agent_id: Agent ID + + Returns: + 更新后的ChatRoom或None + """ + chatroom = await cls.get_chatroom(room_id) + if not chatroom: + return None + + # 验证Agent存在 + agent = await AgentService.get_agent(agent_id) + if not agent: + raise ValueError(f"Agent不存在: {agent_id}") + + # 添加Agent + if agent_id not in chatroom.agents: + chatroom.agents.append(agent_id) + chatroom.updated_at = datetime.utcnow() + await chatroom.save() + + return chatroom + + @classmethod + async def remove_agent(cls, room_id: str, agent_id: str) -> Optional[ChatRoom]: + """ + 从聊天室移除Agent + + Args: + room_id: 聊天室ID + agent_id: Agent ID + + Returns: + 更新后的ChatRoom或None + """ + chatroom = await cls.get_chatroom(room_id) + if not chatroom: + return None + + # 移除Agent + if agent_id in chatroom.agents: + chatroom.agents.remove(agent_id) + chatroom.updated_at = datetime.utcnow() + await chatroom.save() + + return chatroom + + @classmethod + async def set_objective( + cls, + room_id: str, + objective: str + ) -> Optional[ChatRoom]: + """ + 设置讨论目标 + + Args: + room_id: 聊天室ID + objective: 讨论目标 + + Returns: + 更新后的ChatRoom或None + """ + return await cls.update_chatroom(room_id, objective=objective) + + @classmethod + async def update_status( + cls, + room_id: str, + status: ChatRoomStatus + ) -> Optional[ChatRoom]: + """ + 更新聊天室状态 + + Args: + room_id: 聊天室ID + status: 新状态 + + Returns: + 更新后的ChatRoom或None + """ + chatroom = await cls.get_chatroom(room_id) + if not chatroom: + return None + + chatroom.status = status.value + chatroom.updated_at = datetime.utcnow() + + if status == ChatRoomStatus.COMPLETED: + chatroom.completed_at = datetime.utcnow() + + await chatroom.save() + + logger.info(f"聊天室状态更新: {room_id} -> {status.value}") + return chatroom + + @classmethod + async def increment_round(cls, room_id: str) -> Optional[ChatRoom]: + """ + 增加轮次计数 + + Args: + room_id: 聊天室ID + + Returns: + 更新后的ChatRoom或None + """ + chatroom = await cls.get_chatroom(room_id) + if not chatroom: + return None + + chatroom.current_round += 1 + chatroom.updated_at = datetime.utcnow() + await chatroom.save() + + return chatroom + + @classmethod + async def get_messages( + cls, + room_id: str, + limit: int = 50, + skip: int = 0, + discussion_id: Optional[str] = None + ) -> List[Message]: + """ + 获取聊天室消息历史 + + Args: + room_id: 聊天室ID + limit: 返回数量限制 + skip: 跳过数量 + discussion_id: 讨论ID(可选) + + Returns: + 消息列表 + """ + query = {"room_id": room_id} + if discussion_id: + query["discussion_id"] = discussion_id + + return await Message.find(query).sort( + "-created_at" + ).skip(skip).limit(limit).to_list() + + @classmethod + async def get_recent_messages( + cls, + room_id: str, + count: int = 20, + discussion_id: Optional[str] = None + ) -> List[Message]: + """ + 获取最近的消息 + + Args: + room_id: 聊天室ID + count: 消息数量 + discussion_id: 讨论ID(可选) + + Returns: + 消息列表(按时间正序) + """ + messages = await cls.get_messages( + room_id, + limit=count, + discussion_id=discussion_id + ) + return list(reversed(messages)) # 返回正序 diff --git a/backend/services/consensus_manager.py b/backend/services/consensus_manager.py new file mode 100644 index 0000000..d936d33 --- /dev/null +++ b/backend/services/consensus_manager.py @@ -0,0 +1,227 @@ +""" +共识管理器 +判断讨论是否达成共识 +""" +import json +from typing import Dict, Any, Optional +from loguru import logger + +from models.agent import Agent +from models.chatroom import ChatRoom +from services.ai_provider_service import AIProviderService + + +class ConsensusManager: + """ + 共识管理器 + 使用主持人Agent判断讨论共识 + """ + + # 共识判断提示词模板 + CONSENSUS_PROMPT = """你是讨论的主持人,负责判断讨论是否达成共识。 + +讨论目标:{objective} + +对话历史: +{history} + +请仔细分析对话内容,判断: +1. 参与者是否对核心问题达成一致意见? +2. 是否还有重要分歧未解决? +3. 讨论结果是否足够明确和可执行? + +请以JSON格式回复(不要包含任何其他文字): +{{ + "consensus_reached": true或false, + "confidence": 0到1之间的数字, + "summary": "讨论结果摘要,简洁概括达成的共识或当前状态", + "action_items": ["具体的行动项列表"], + "unresolved_issues": ["未解决的问题列表"], + "key_decisions": ["关键决策列表"] +}} + +注意: +- consensus_reached为true表示核心问题已有明确结论 +- confidence表示你对共识判断的信心程度 +- 如果讨论仍有争议或不够深入,应该返回false +- action_items应该是具体可执行的任务 +- 请确保返回有效的JSON格式""" + + @classmethod + async def check_consensus( + cls, + moderator: Agent, + context: "DiscussionContext", + chatroom: ChatRoom + ) -> Dict[str, Any]: + """ + 检查是否达成共识 + + Args: + moderator: 主持人Agent + context: 讨论上下文 + chatroom: 聊天室 + + Returns: + 共识判断结果 + """ + from services.discussion_engine import DiscussionContext + + # 构建历史记录 + history_text = "" + for msg in context.messages: + if msg.agent_id: + history_text += f"[{msg.agent_id}]: {msg.content}\n\n" + else: + history_text += f"[系统]: {msg.content}\n\n" + + if not history_text: + return { + "consensus_reached": False, + "confidence": 0, + "summary": "讨论尚未开始", + "action_items": [], + "unresolved_issues": [], + "key_decisions": [] + } + + # 构建提示词 + prompt = cls.CONSENSUS_PROMPT.format( + objective=context.objective, + history=history_text + ) + + try: + # 调用主持人Agent的AI接口 + response = await AIProviderService.chat( + provider_id=moderator.provider_id, + messages=[{"role": "user", "content": prompt}], + temperature=0.3, # 使用较低温度以获得更一致的结果 + max_tokens=1000 + ) + + if not response.success: + logger.error(f"共识判断失败: {response.error}") + return cls._default_result("AI接口调用失败") + + # 解析JSON响应 + content = response.content.strip() + + # 尝试提取JSON部分 + try: + # 尝试直接解析 + result = json.loads(content) + except json.JSONDecodeError: + # 尝试提取JSON块 + import re + json_match = re.search(r'\{[\s\S]*\}', content) + if json_match: + try: + result = json.loads(json_match.group()) + except json.JSONDecodeError: + logger.warning(f"无法解析共识判断结果: {content}") + return cls._default_result("无法解析AI响应") + else: + return cls._default_result("AI响应格式错误") + + # 验证和规范化结果 + return cls._normalize_result(result) + + except Exception as e: + logger.error(f"共识判断异常: {e}") + return cls._default_result(str(e)) + + @classmethod + async def generate_summary( + cls, + moderator: Agent, + context: "DiscussionContext" + ) -> str: + """ + 生成讨论摘要 + + Args: + moderator: 主持人Agent + context: 讨论上下文 + + Returns: + 讨论摘要 + """ + from services.discussion_engine import DiscussionContext + + # 构建历史记录 + history_text = "" + for msg in context.messages: + if msg.agent_id: + history_text += f"[{msg.agent_id}]: {msg.content}\n\n" + + prompt = f"""请为以下讨论生成一份简洁的摘要。 + +讨论目标:{context.objective} + +对话记录: +{history_text} + +请提供: +1. 讨论的主要观点和结论 +2. 参与者的立场和建议 +3. 最终的决策或共识(如果有) + +摘要应该简洁明了,控制在300字以内。""" + + try: + response = await AIProviderService.chat( + provider_id=moderator.provider_id, + messages=[{"role": "user", "content": prompt}], + temperature=0.5, + max_tokens=500 + ) + + if response.success: + return response.content.strip() + else: + return "无法生成摘要" + + except Exception as e: + logger.error(f"生成摘要异常: {e}") + return "生成摘要时发生错误" + + @classmethod + def _default_result(cls, error: str = "") -> Dict[str, Any]: + """ + 返回默认结果 + + Args: + error: 错误信息 + + Returns: + 默认共识结果 + """ + return { + "consensus_reached": False, + "confidence": 0, + "summary": error if error else "共识判断失败", + "action_items": [], + "unresolved_issues": [], + "key_decisions": [] + } + + @classmethod + def _normalize_result(cls, result: Dict[str, Any]) -> Dict[str, Any]: + """ + 规范化共识结果 + + Args: + result: 原始结果 + + Returns: + 规范化的结果 + """ + return { + "consensus_reached": bool(result.get("consensus_reached", False)), + "confidence": max(0, min(1, float(result.get("confidence", 0)))), + "summary": str(result.get("summary", "")), + "action_items": list(result.get("action_items", [])), + "unresolved_issues": list(result.get("unresolved_issues", [])), + "key_decisions": list(result.get("key_decisions", [])) + } diff --git a/backend/services/discussion_engine.py b/backend/services/discussion_engine.py new file mode 100644 index 0000000..78723eb --- /dev/null +++ b/backend/services/discussion_engine.py @@ -0,0 +1,589 @@ +""" +讨论引擎 +实现自由讨论的核心逻辑 +""" +import uuid +import asyncio +from datetime import datetime +from typing import List, Dict, Any, Optional +from dataclasses import dataclass, field +from loguru import logger + +from models.chatroom import ChatRoom, ChatRoomStatus +from models.agent import Agent +from models.message import Message, MessageType +from models.discussion_result import DiscussionResult +from services.ai_provider_service import AIProviderService +from services.agent_service import AgentService +from services.chatroom_service import ChatRoomService +from services.message_router import MessageRouter +from services.consensus_manager import ConsensusManager + + +@dataclass +class DiscussionContext: + """讨论上下文""" + discussion_id: str + room_id: str + objective: str + current_round: int = 0 + messages: List[Message] = field(default_factory=list) + agent_speak_counts: Dict[str, int] = field(default_factory=dict) + + def add_message(self, message: Message) -> None: + """添加消息到上下文""" + self.messages.append(message) + if message.agent_id: + self.agent_speak_counts[message.agent_id] = \ + self.agent_speak_counts.get(message.agent_id, 0) + 1 + + def get_recent_messages(self, count: int = 20) -> List[Message]: + """获取最近的消息""" + return self.messages[-count:] if len(self.messages) > count else self.messages + + def get_agent_speak_count(self, agent_id: str) -> int: + """获取Agent在当前轮次的发言次数""" + return self.agent_speak_counts.get(agent_id, 0) + + def reset_round_counts(self) -> None: + """重置轮次发言计数""" + self.agent_speak_counts.clear() + + +class DiscussionEngine: + """ + 讨论引擎 + 实现多Agent自由讨论的核心逻辑 + """ + + # 活跃的讨论: room_id -> DiscussionContext + _active_discussions: Dict[str, DiscussionContext] = {} + + # 停止信号 + _stop_signals: Dict[str, bool] = {} + + @classmethod + async def start_discussion( + cls, + room_id: str, + objective: str + ) -> Optional[DiscussionResult]: + """ + 启动讨论 + + Args: + room_id: 聊天室ID + objective: 讨论目标 + + Returns: + 讨论结果 + """ + # 获取聊天室 + chatroom = await ChatRoomService.get_chatroom(room_id) + if not chatroom: + raise ValueError(f"聊天室不存在: {room_id}") + + if not chatroom.agents: + raise ValueError("聊天室没有Agent参与") + + if not objective: + raise ValueError("讨论目标不能为空") + + # 检查是否已有活跃讨论 + if room_id in cls._active_discussions: + raise ValueError("聊天室已有进行中的讨论") + + # 创建讨论 + discussion_id = f"disc-{uuid.uuid4().hex[:8]}" + + # 创建讨论结果记录 + discussion_result = DiscussionResult( + discussion_id=discussion_id, + room_id=room_id, + objective=objective, + status="in_progress", + created_at=datetime.utcnow(), + updated_at=datetime.utcnow() + ) + await discussion_result.insert() + + # 创建讨论上下文 + context = DiscussionContext( + discussion_id=discussion_id, + room_id=room_id, + objective=objective + ) + cls._active_discussions[room_id] = context + cls._stop_signals[room_id] = False + + # 更新聊天室状态 + await ChatRoomService.update_chatroom( + room_id, + status=ChatRoomStatus.ACTIVE.value, + objective=objective, + current_discussion_id=discussion_id, + current_round=0 + ) + + # 广播讨论开始 + await MessageRouter.broadcast_status(room_id, "discussion_started", { + "discussion_id": discussion_id, + "objective": objective + }) + + # 发送系统消息 + await MessageRouter.save_and_broadcast_message( + room_id=room_id, + discussion_id=discussion_id, + agent_id=None, + content=f"讨论开始\n\n目标:{objective}", + message_type=MessageType.SYSTEM.value, + round_num=0 + ) + + logger.info(f"讨论开始: {room_id} - {objective}") + + # 运行讨论循环 + try: + result = await cls._run_discussion_loop(chatroom, context) + return result + except Exception as e: + logger.error(f"讨论异常: {e}") + await cls._handle_discussion_error(room_id, discussion_id, str(e)) + raise + finally: + # 清理 + cls._active_discussions.pop(room_id, None) + cls._stop_signals.pop(room_id, None) + + @classmethod + async def stop_discussion(cls, room_id: str) -> bool: + """ + 停止讨论 + + Args: + room_id: 聊天室ID + + Returns: + 是否成功 + """ + if room_id not in cls._active_discussions: + return False + + cls._stop_signals[room_id] = True + logger.info(f"收到停止讨论信号: {room_id}") + return True + + @classmethod + async def pause_discussion(cls, room_id: str) -> bool: + """ + 暂停讨论 + + Args: + room_id: 聊天室ID + + Returns: + 是否成功 + """ + if room_id not in cls._active_discussions: + return False + + await ChatRoomService.update_status(room_id, ChatRoomStatus.PAUSED) + await MessageRouter.broadcast_status(room_id, "discussion_paused") + + logger.info(f"讨论暂停: {room_id}") + return True + + @classmethod + async def resume_discussion(cls, room_id: str) -> bool: + """ + 恢复讨论 + + Args: + room_id: 聊天室ID + + Returns: + 是否成功 + """ + chatroom = await ChatRoomService.get_chatroom(room_id) + if not chatroom or chatroom.status != ChatRoomStatus.PAUSED.value: + return False + + await ChatRoomService.update_status(room_id, ChatRoomStatus.ACTIVE) + await MessageRouter.broadcast_status(room_id, "discussion_resumed") + + logger.info(f"讨论恢复: {room_id}") + return True + + @classmethod + async def _run_discussion_loop( + cls, + chatroom: ChatRoom, + context: DiscussionContext + ) -> DiscussionResult: + """ + 运行讨论循环 + + Args: + chatroom: 聊天室 + context: 讨论上下文 + + Returns: + 讨论结果 + """ + room_id = chatroom.room_id + config = chatroom.get_config() + + # 获取所有Agent + agents = await AgentService.get_agents_by_ids(chatroom.agents) + agent_map = {a.agent_id: a for a in agents} + + # 获取主持人(用于共识判断) + moderator = None + if chatroom.moderator_agent_id: + moderator = await AgentService.get_agent(chatroom.moderator_agent_id) + + consecutive_no_speak = 0 # 连续无人发言的轮次 + + while context.current_round < config.max_rounds: + # 检查停止信号 + if cls._stop_signals.get(room_id, False): + break + + # 检查暂停状态 + current_chatroom = await ChatRoomService.get_chatroom(room_id) + if current_chatroom and current_chatroom.status == ChatRoomStatus.PAUSED.value: + await asyncio.sleep(1) + continue + + # 增加轮次 + context.current_round += 1 + context.reset_round_counts() + + # 广播轮次信息 + await MessageRouter.broadcast_round_info( + room_id, + context.current_round, + config.max_rounds + ) + + # 更新聊天室轮次 + await ChatRoomService.update_chatroom( + room_id, + current_round=context.current_round + ) + + # 本轮是否有人发言 + round_has_message = False + + # 遍历所有Agent,判断是否发言 + for agent_id in chatroom.agents: + agent = agent_map.get(agent_id) + if not agent or not agent.enabled: + continue + + # 检查本轮发言次数限制 + behavior = agent.get_behavior() + if context.get_agent_speak_count(agent_id) >= behavior.max_speak_per_round: + continue + + # 判断是否发言 + should_speak, content = await cls._should_agent_speak( + agent, context, chatroom + ) + + if should_speak and content: + # 广播输入状态 + await MessageRouter.broadcast_typing(room_id, agent_id, True) + + # 保存并广播消息 + message = await MessageRouter.save_and_broadcast_message( + room_id=room_id, + discussion_id=context.discussion_id, + agent_id=agent_id, + content=content, + message_type=MessageType.TEXT.value, + round_num=context.current_round + ) + + # 更新上下文 + context.add_message(message) + round_has_message = True + + # 广播输入结束 + await MessageRouter.broadcast_typing(room_id, agent_id, False) + + # 轮次间隔 + await asyncio.sleep(config.round_interval) + + # 检查是否需要共识判断 + if round_has_message and moderator: + consecutive_no_speak = 0 + + # 每隔几轮检查一次共识 + if context.current_round % 3 == 0 or context.current_round >= config.max_rounds - 5: + consensus_result = await ConsensusManager.check_consensus( + moderator, context, chatroom + ) + + if consensus_result.get("consensus_reached", False): + confidence = consensus_result.get("confidence", 0) + if confidence >= config.consensus_threshold: + # 达成共识,结束讨论 + return await cls._finalize_discussion( + context, + consensus_result, + "consensus" + ) + else: + consecutive_no_speak += 1 + + # 连续多轮无人发言,检查共识或结束 + if consecutive_no_speak >= 3: + if moderator: + consensus_result = await ConsensusManager.check_consensus( + moderator, context, chatroom + ) + return await cls._finalize_discussion( + context, + consensus_result, + "no_more_discussion" + ) + else: + return await cls._finalize_discussion( + context, + {"consensus_reached": False, "summary": "讨论结束,无明确共识"}, + "no_more_discussion" + ) + + # 达到最大轮次 + if moderator: + consensus_result = await ConsensusManager.check_consensus( + moderator, context, chatroom + ) + else: + consensus_result = {"consensus_reached": False, "summary": "达到最大轮次限制"} + + return await cls._finalize_discussion( + context, + consensus_result, + "max_rounds" + ) + + @classmethod + async def _should_agent_speak( + cls, + agent: Agent, + context: DiscussionContext, + chatroom: ChatRoom + ) -> tuple[bool, str]: + """ + 判断Agent是否应该发言 + + Args: + agent: Agent实例 + context: 讨论上下文 + chatroom: 聊天室 + + Returns: + (是否发言, 发言内容) + """ + # 构建判断提示词 + recent_messages = context.get_recent_messages(chatroom.get_config().message_history_size) + + history_text = "" + for msg in recent_messages: + if msg.agent_id: + history_text += f"[{msg.agent_id}]: {msg.content}\n\n" + else: + history_text += f"[系统]: {msg.content}\n\n" + + prompt = f"""你是{agent.name},角色是{agent.role}。 + +{agent.system_prompt} + +当前讨论目标:{context.objective} + +对话历史: +{history_text if history_text else "(还没有对话)"} + +当前是第{context.current_round}轮讨论。 + +请根据你的角色判断: +1. 你是否有新的观点或建议要分享? +2. 你是否需要回应其他人的观点? +3. 当前讨论是否需要你的专业意见? + +如果你认为需要发言,请直接给出你的发言内容。 +如果你认为暂时不需要发言(例如等待更多信息、当前轮次已有足够讨论、或者你的观点已经充分表达),请只回复"PASS"。 + +注意: +- 请保持发言简洁有力,每次发言控制在200字以内 +- 避免重复已经说过的内容 +- 如果已经达成共识或接近共识,可以选择PASS""" + + try: + # 调用AI接口 + response = await AIProviderService.chat( + provider_id=agent.provider_id, + messages=[{"role": "user", "content": prompt}], + temperature=agent.temperature, + max_tokens=agent.max_tokens + ) + + if not response.success: + logger.warning(f"Agent {agent.agent_id} 响应失败: {response.error}") + return False, "" + + content = response.content.strip() + + # 判断是否PASS + if content.upper() == "PASS" or content.upper().startswith("PASS"): + return False, "" + + return True, content + + except Exception as e: + logger.error(f"Agent {agent.agent_id} 判断发言异常: {e}") + return False, "" + + @classmethod + async def _finalize_discussion( + cls, + context: DiscussionContext, + consensus_result: Dict[str, Any], + end_reason: str + ) -> DiscussionResult: + """ + 完成讨论,保存结果 + + Args: + context: 讨论上下文 + consensus_result: 共识判断结果 + end_reason: 结束原因 + + Returns: + 讨论结果 + """ + room_id = context.room_id + + # 获取讨论结果记录 + discussion_result = await DiscussionResult.find_one( + DiscussionResult.discussion_id == context.discussion_id + ) + + if discussion_result: + # 更新统计 + discussion_result.update_stats( + total_rounds=context.current_round, + total_messages=len(context.messages), + agent_contributions=context.agent_speak_counts + ) + + # 标记完成 + discussion_result.mark_completed( + consensus_reached=consensus_result.get("consensus_reached", False), + confidence=consensus_result.get("confidence", 0), + summary=consensus_result.get("summary", ""), + action_items=consensus_result.get("action_items", []), + unresolved_issues=consensus_result.get("unresolved_issues", []), + end_reason=end_reason + ) + + await discussion_result.save() + + # 更新聊天室状态 + await ChatRoomService.update_status(room_id, ChatRoomStatus.COMPLETED) + + # 发送系统消息 + summary_text = f"""讨论结束 + +结果:{"达成共识" if consensus_result.get("consensus_reached") else "未达成明确共识"} +置信度:{consensus_result.get("confidence", 0):.0%} + +摘要:{consensus_result.get("summary", "无")} + +行动项: +{chr(10).join("- " + item for item in consensus_result.get("action_items", [])) or "无"} + +未解决问题: +{chr(10).join("- " + issue for issue in consensus_result.get("unresolved_issues", [])) or "无"} + +共进行 {context.current_round} 轮讨论,产生 {len(context.messages)} 条消息。""" + + await MessageRouter.save_and_broadcast_message( + room_id=room_id, + discussion_id=context.discussion_id, + agent_id=None, + content=summary_text, + message_type=MessageType.SYSTEM.value, + round_num=context.current_round + ) + + # 广播讨论结束 + await MessageRouter.broadcast_status(room_id, "discussion_completed", { + "discussion_id": context.discussion_id, + "consensus_reached": consensus_result.get("consensus_reached", False), + "end_reason": end_reason + }) + + logger.info(f"讨论结束: {room_id}, 原因: {end_reason}") + + return discussion_result + + @classmethod + async def _handle_discussion_error( + cls, + room_id: str, + discussion_id: str, + error: str + ) -> None: + """ + 处理讨论错误 + + Args: + room_id: 聊天室ID + discussion_id: 讨论ID + error: 错误信息 + """ + # 更新聊天室状态 + await ChatRoomService.update_status(room_id, ChatRoomStatus.ERROR) + + # 更新讨论结果 + discussion_result = await DiscussionResult.find_one( + DiscussionResult.discussion_id == discussion_id + ) + if discussion_result: + discussion_result.status = "failed" + discussion_result.end_reason = f"error: {error}" + discussion_result.updated_at = datetime.utcnow() + await discussion_result.save() + + # 广播错误 + await MessageRouter.broadcast_error(room_id, error) + + @classmethod + def get_active_discussion(cls, room_id: str) -> Optional[DiscussionContext]: + """ + 获取活跃的讨论上下文 + + Args: + room_id: 聊天室ID + + Returns: + 讨论上下文或None + """ + return cls._active_discussions.get(room_id) + + @classmethod + def is_discussion_active(cls, room_id: str) -> bool: + """ + 检查是否有活跃讨论 + + Args: + room_id: 聊天室ID + + Returns: + 是否活跃 + """ + return room_id in cls._active_discussions diff --git a/backend/services/mcp_service.py b/backend/services/mcp_service.py new file mode 100644 index 0000000..91b6782 --- /dev/null +++ b/backend/services/mcp_service.py @@ -0,0 +1,252 @@ +""" +MCP服务 +管理MCP工具的集成和调用 +""" +import json +import os +from typing import List, Dict, Any, Optional +from pathlib import Path +from loguru import logger + + +class MCPService: + """ + MCP工具服务 + 集成MCP服务器,提供工具调用能力 + """ + + # MCP服务器配置目录 + MCP_CONFIG_DIR = Path(os.getenv("CURSOR_MCP_DIR", "~/.cursor/mcps")).expanduser() + + # 已注册的工具: server_name -> List[tool_info] + _registered_tools: Dict[str, List[Dict[str, Any]]] = {} + + # Agent工具映射: agent_id -> List[tool_name] + _agent_tools: Dict[str, List[str]] = {} + + @classmethod + async def initialize(cls) -> None: + """ + 初始化MCP服务 + 扫描并注册可用的MCP工具 + """ + logger.info("初始化MCP服务...") + + if not cls.MCP_CONFIG_DIR.exists(): + logger.warning(f"MCP配置目录不存在: {cls.MCP_CONFIG_DIR}") + return + + # 扫描MCP服务器目录 + for server_dir in cls.MCP_CONFIG_DIR.iterdir(): + if server_dir.is_dir(): + await cls._scan_server(server_dir) + + logger.info(f"MCP服务初始化完成,已注册 {len(cls._registered_tools)} 个服务器") + + @classmethod + async def _scan_server(cls, server_dir: Path) -> None: + """ + 扫描MCP服务器目录 + + Args: + server_dir: 服务器目录 + """ + server_name = server_dir.name + tools_dir = server_dir / "tools" + + if not tools_dir.exists(): + return + + tools = [] + for tool_file in tools_dir.glob("*.json"): + try: + with open(tool_file, "r", encoding="utf-8") as f: + tool_info = json.load(f) + tool_info["_file"] = str(tool_file) + tools.append(tool_info) + except Exception as e: + logger.warning(f"加载MCP工具配置失败: {tool_file} - {e}") + + if tools: + cls._registered_tools[server_name] = tools + logger.debug(f"注册MCP服务器: {server_name}, 工具数: {len(tools)}") + + @classmethod + def list_servers(cls) -> List[str]: + """ + 列出所有可用的MCP服务器 + + Returns: + 服务器名称列表 + """ + return list(cls._registered_tools.keys()) + + @classmethod + def list_tools(cls, server: Optional[str] = None) -> List[Dict[str, Any]]: + """ + 列出可用的MCP工具 + + Args: + server: 服务器名称(可选,不指定则返回所有) + + Returns: + 工具信息列表 + """ + if server: + return cls._registered_tools.get(server, []) + + # 返回所有工具 + all_tools = [] + for server_name, tools in cls._registered_tools.items(): + for tool in tools: + tool_copy = tool.copy() + tool_copy["server"] = server_name + all_tools.append(tool_copy) + + return all_tools + + @classmethod + def get_tool(cls, server: str, tool_name: str) -> Optional[Dict[str, Any]]: + """ + 获取指定工具的信息 + + Args: + server: 服务器名称 + tool_name: 工具名称 + + Returns: + 工具信息或None + """ + tools = cls._registered_tools.get(server, []) + for tool in tools: + if tool.get("name") == tool_name: + return tool + return None + + @classmethod + async def call_tool( + cls, + server: str, + tool_name: str, + arguments: Dict[str, Any] + ) -> Dict[str, Any]: + """ + 调用MCP工具 + + Args: + server: 服务器名称 + tool_name: 工具名称 + arguments: 工具参数 + + Returns: + 调用结果 + """ + tool = cls.get_tool(server, tool_name) + if not tool: + return { + "success": False, + "error": f"工具不存在: {server}/{tool_name}" + } + + # TODO: 实际的MCP工具调用逻辑 + # 这里需要根据MCP协议实现工具调用 + # 目前返回模拟结果 + logger.info(f"调用MCP工具: {server}/{tool_name}, 参数: {arguments}") + + return { + "success": True, + "result": f"MCP工具调用: {tool_name}", + "tool": tool_name, + "server": server, + "arguments": arguments + } + + @classmethod + def register_tool_for_agent( + cls, + agent_id: str, + tool_name: str + ) -> bool: + """ + 为Agent注册可用工具 + + Args: + agent_id: Agent ID + tool_name: 工具名称(格式: server/tool_name) + + Returns: + 是否注册成功 + """ + if agent_id not in cls._agent_tools: + cls._agent_tools[agent_id] = [] + + if tool_name not in cls._agent_tools[agent_id]: + cls._agent_tools[agent_id].append(tool_name) + return True + + return False + + @classmethod + def unregister_tool_for_agent( + cls, + agent_id: str, + tool_name: str + ) -> bool: + """ + 为Agent注销工具 + + Args: + agent_id: Agent ID + tool_name: 工具名称 + + Returns: + 是否注销成功 + """ + if agent_id in cls._agent_tools: + if tool_name in cls._agent_tools[agent_id]: + cls._agent_tools[agent_id].remove(tool_name) + return True + return False + + @classmethod + def get_agent_tools(cls, agent_id: str) -> List[str]: + """ + 获取Agent可用的工具列表 + + Args: + agent_id: Agent ID + + Returns: + 工具名称列表 + """ + return cls._agent_tools.get(agent_id, []) + + @classmethod + def get_tools_for_prompt(cls, agent_id: str) -> str: + """ + 获取用于提示词的工具描述 + + Args: + agent_id: Agent ID + + Returns: + 工具描述文本 + """ + tool_names = cls.get_agent_tools(agent_id) + if not tool_names: + return "" + + descriptions = [] + for full_name in tool_names: + parts = full_name.split("/", 1) + if len(parts) == 2: + server, tool_name = parts + tool = cls.get_tool(server, tool_name) + if tool: + desc = tool.get("description", "无描述") + descriptions.append(f"- {tool_name}: {desc}") + + if not descriptions: + return "" + + return "你可以使用以下工具:\n" + "\n".join(descriptions) diff --git a/backend/services/memory_service.py b/backend/services/memory_service.py new file mode 100644 index 0000000..4bb2fdd --- /dev/null +++ b/backend/services/memory_service.py @@ -0,0 +1,416 @@ +""" +记忆服务 +管理Agent的记忆存储和检索 +""" +import uuid +from datetime import datetime, timedelta +from typing import List, Dict, Any, Optional +import numpy as np +from loguru import logger + +from models.agent_memory import AgentMemory, MemoryType + + +class MemoryService: + """ + Agent记忆服务 + 提供记忆的存储、检索和管理功能 + """ + + # 嵌入模型(延迟加载) + _embedding_model = None + + @classmethod + def _get_embedding_model(cls): + """ + 获取嵌入模型实例(延迟加载) + """ + if cls._embedding_model is None: + try: + from sentence_transformers import SentenceTransformer + cls._embedding_model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2') + logger.info("嵌入模型加载成功") + except Exception as e: + logger.warning(f"嵌入模型加载失败: {e}") + return None + return cls._embedding_model + + @classmethod + async def create_memory( + cls, + agent_id: str, + content: str, + memory_type: str = MemoryType.SHORT_TERM.value, + importance: float = 0.5, + source_room_id: Optional[str] = None, + source_discussion_id: Optional[str] = None, + tags: Optional[List[str]] = None, + expires_in_hours: Optional[int] = None + ) -> AgentMemory: + """ + 创建新的记忆 + + Args: + agent_id: Agent ID + content: 记忆内容 + memory_type: 记忆类型 + importance: 重要性评分 + source_room_id: 来源聊天室 + source_discussion_id: 来源讨论 + tags: 标签 + expires_in_hours: 过期时间(小时) + + Returns: + 创建的AgentMemory文档 + """ + memory_id = f"mem-{uuid.uuid4().hex[:12]}" + + # 生成向量嵌入 + embedding = await cls._generate_embedding(content) + + # 生成摘要 + summary = content[:100] + "..." if len(content) > 100 else content + + # 计算过期时间 + expires_at = None + if expires_in_hours: + expires_at = datetime.utcnow() + timedelta(hours=expires_in_hours) + + memory = AgentMemory( + memory_id=memory_id, + agent_id=agent_id, + memory_type=memory_type, + content=content, + summary=summary, + embedding=embedding, + importance=importance, + source_room_id=source_room_id, + source_discussion_id=source_discussion_id, + tags=tags or [], + created_at=datetime.utcnow(), + last_accessed=datetime.utcnow(), + expires_at=expires_at + ) + + await memory.insert() + + logger.debug(f"创建记忆: {memory_id} for Agent {agent_id}") + return memory + + @classmethod + async def get_memory(cls, memory_id: str) -> Optional[AgentMemory]: + """ + 获取指定记忆 + + Args: + memory_id: 记忆ID + + Returns: + AgentMemory文档或None + """ + return await AgentMemory.find_one(AgentMemory.memory_id == memory_id) + + @classmethod + async def get_agent_memories( + cls, + agent_id: str, + memory_type: Optional[str] = None, + limit: int = 50 + ) -> List[AgentMemory]: + """ + 获取Agent的记忆列表 + + Args: + agent_id: Agent ID + memory_type: 记忆类型(可选) + limit: 返回数量限制 + + Returns: + 记忆列表 + """ + query = {"agent_id": agent_id} + if memory_type: + query["memory_type"] = memory_type + + return await AgentMemory.find(query).sort( + "-importance", "-last_accessed" + ).limit(limit).to_list() + + @classmethod + async def search_memories( + cls, + agent_id: str, + query: str, + limit: int = 10, + memory_type: Optional[str] = None, + min_relevance: float = 0.3 + ) -> List[Dict[str, Any]]: + """ + 搜索相关记忆 + + Args: + agent_id: Agent ID + query: 查询文本 + limit: 返回数量 + memory_type: 记忆类型(可选) + min_relevance: 最小相关性阈值 + + Returns: + 带相关性分数的记忆列表 + """ + # 生成查询向量 + query_embedding = await cls._generate_embedding(query) + if not query_embedding: + # 无法生成向量时,使用文本匹配 + return await cls._text_search(agent_id, query, limit, memory_type) + + # 获取Agent的所有记忆 + filter_query = {"agent_id": agent_id} + if memory_type: + filter_query["memory_type"] = memory_type + + memories = await AgentMemory.find(filter_query).to_list() + + # 计算相似度 + results = [] + for memory in memories: + if memory.is_expired(): + continue + + if memory.embedding: + similarity = cls._cosine_similarity(query_embedding, memory.embedding) + relevance = memory.calculate_relevance_score(similarity) + + if relevance >= min_relevance: + results.append({ + "memory": memory, + "similarity": similarity, + "relevance": relevance + }) + + # 按相关性排序 + results.sort(key=lambda x: x["relevance"], reverse=True) + + # 更新访问记录 + for item in results[:limit]: + memory = item["memory"] + memory.access() + await memory.save() + + return results[:limit] + + @classmethod + async def update_memory( + cls, + memory_id: str, + **kwargs + ) -> Optional[AgentMemory]: + """ + 更新记忆 + + Args: + memory_id: 记忆ID + **kwargs: 要更新的字段 + + Returns: + 更新后的AgentMemory或None + """ + memory = await cls.get_memory(memory_id) + if not memory: + return None + + # 如果更新了内容,重新生成嵌入 + if "content" in kwargs: + kwargs["embedding"] = await cls._generate_embedding(kwargs["content"]) + kwargs["summary"] = kwargs["content"][:100] + "..." if len(kwargs["content"]) > 100 else kwargs["content"] + + for key, value in kwargs.items(): + if hasattr(memory, key): + setattr(memory, key, value) + + await memory.save() + return memory + + @classmethod + async def delete_memory(cls, memory_id: str) -> bool: + """ + 删除记忆 + + Args: + memory_id: 记忆ID + + Returns: + 是否删除成功 + """ + memory = await cls.get_memory(memory_id) + if not memory: + return False + + await memory.delete() + return True + + @classmethod + async def delete_agent_memories( + cls, + agent_id: str, + memory_type: Optional[str] = None + ) -> int: + """ + 删除Agent的记忆 + + Args: + agent_id: Agent ID + memory_type: 记忆类型(可选) + + Returns: + 删除的数量 + """ + query = {"agent_id": agent_id} + if memory_type: + query["memory_type"] = memory_type + + result = await AgentMemory.find(query).delete() + return result.deleted_count if result else 0 + + @classmethod + async def cleanup_expired_memories(cls) -> int: + """ + 清理过期的记忆 + + Returns: + 清理的数量 + """ + now = datetime.utcnow() + result = await AgentMemory.find( + {"expires_at": {"$lt": now}} + ).delete() + + count = result.deleted_count if result else 0 + if count > 0: + logger.info(f"清理了 {count} 条过期记忆") + + return count + + @classmethod + async def consolidate_memories( + cls, + agent_id: str, + min_importance: float = 0.7, + max_age_days: int = 30 + ) -> None: + """ + 整合记忆(将重要的短期记忆转为长期记忆) + + Args: + agent_id: Agent ID + min_importance: 最小重要性阈值 + max_age_days: 最大年龄(天) + """ + cutoff_date = datetime.utcnow() - timedelta(days=max_age_days) + + # 查找符合条件的短期记忆 + memories = await AgentMemory.find({ + "agent_id": agent_id, + "memory_type": MemoryType.SHORT_TERM.value, + "importance": {"$gte": min_importance}, + "created_at": {"$lt": cutoff_date} + }).to_list() + + for memory in memories: + memory.memory_type = MemoryType.LONG_TERM.value + memory.expires_at = None # 长期记忆不过期 + await memory.save() + + if memories: + logger.info(f"整合了 {len(memories)} 条记忆为长期记忆: Agent {agent_id}") + + @classmethod + async def _generate_embedding(cls, text: str) -> List[float]: + """ + 生成文本的向量嵌入 + + Args: + text: 文本内容 + + Returns: + 向量嵌入列表 + """ + model = cls._get_embedding_model() + if model is None: + return [] + + try: + embedding = model.encode(text, convert_to_numpy=True) + return embedding.tolist() + except Exception as e: + logger.warning(f"生成嵌入失败: {e}") + return [] + + @classmethod + def _cosine_similarity(cls, vec1: List[float], vec2: List[float]) -> float: + """ + 计算余弦相似度 + + Args: + vec1: 向量1 + vec2: 向量2 + + Returns: + 相似度 (0-1) + """ + if not vec1 or not vec2: + return 0.0 + + try: + a = np.array(vec1) + b = np.array(vec2) + similarity = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) + return float(max(0, similarity)) + except Exception: + return 0.0 + + @classmethod + async def _text_search( + cls, + agent_id: str, + query: str, + limit: int, + memory_type: Optional[str] + ) -> List[Dict[str, Any]]: + """ + 文本搜索(后备方案) + + Args: + agent_id: Agent ID + query: 查询文本 + limit: 返回数量 + memory_type: 记忆类型 + + Returns: + 记忆列表 + """ + filter_query = {"agent_id": agent_id} + if memory_type: + filter_query["memory_type"] = memory_type + + # 简单的文本匹配 + memories = await AgentMemory.find(filter_query).to_list() + + results = [] + query_lower = query.lower() + for memory in memories: + if memory.is_expired(): + continue + + content_lower = memory.content.lower() + if query_lower in content_lower: + # 计算简单的匹配分数 + score = len(query_lower) / len(content_lower) + results.append({ + "memory": memory, + "similarity": score, + "relevance": score * memory.importance + }) + + results.sort(key=lambda x: x["relevance"], reverse=True) + return results[:limit] diff --git a/backend/services/message_router.py b/backend/services/message_router.py new file mode 100644 index 0000000..a1423d2 --- /dev/null +++ b/backend/services/message_router.py @@ -0,0 +1,335 @@ +""" +消息路由服务 +管理消息的发送和广播 +""" +import uuid +import asyncio +from datetime import datetime +from typing import List, Dict, Any, Optional, Callable, Set +from dataclasses import dataclass, field +from loguru import logger +from fastapi import WebSocket + +from models.message import Message, MessageType +from models.chatroom import ChatRoom + + +@dataclass +class WebSocketConnection: + """WebSocket连接信息""" + websocket: WebSocket + room_id: str + connected_at: datetime = field(default_factory=datetime.utcnow) + + +class MessageRouter: + """ + 消息路由器 + 管理WebSocket连接和消息广播 + """ + + # 房间连接映射: room_id -> Set[WebSocket] + _room_connections: Dict[str, Set[WebSocket]] = {} + + # 消息回调: 用于外部订阅消息 + _message_callbacks: List[Callable] = [] + + @classmethod + async def connect(cls, room_id: str, websocket: WebSocket) -> None: + """ + 建立WebSocket连接 + + Args: + room_id: 聊天室ID + websocket: WebSocket实例 + """ + await websocket.accept() + + if room_id not in cls._room_connections: + cls._room_connections[room_id] = set() + + cls._room_connections[room_id].add(websocket) + + logger.info(f"WebSocket连接建立: {room_id}, 当前连接数: {len(cls._room_connections[room_id])}") + + @classmethod + async def disconnect(cls, room_id: str, websocket: WebSocket) -> None: + """ + 断开WebSocket连接 + + Args: + room_id: 聊天室ID + websocket: WebSocket实例 + """ + if room_id in cls._room_connections: + cls._room_connections[room_id].discard(websocket) + + # 清理空房间 + if not cls._room_connections[room_id]: + del cls._room_connections[room_id] + + logger.info(f"WebSocket连接断开: {room_id}") + + @classmethod + async def broadcast_to_room( + cls, + room_id: str, + message: Dict[str, Any] + ) -> None: + """ + 向聊天室广播消息 + + Args: + room_id: 聊天室ID + message: 消息内容 + """ + if room_id not in cls._room_connections: + return + + # 获取所有连接 + connections = cls._room_connections[room_id].copy() + + # 并发发送 + tasks = [] + for websocket in connections: + tasks.append(cls._send_message(room_id, websocket, message)) + + if tasks: + await asyncio.gather(*tasks, return_exceptions=True) + + @classmethod + async def _send_message( + cls, + room_id: str, + websocket: WebSocket, + message: Dict[str, Any] + ) -> None: + """ + 向单个WebSocket发送消息 + + Args: + room_id: 聊天室ID + websocket: WebSocket实例 + message: 消息内容 + """ + try: + await websocket.send_json(message) + except Exception as e: + logger.warning(f"WebSocket发送失败: {e}") + # 移除断开的连接 + await cls.disconnect(room_id, websocket) + + @classmethod + async def save_and_broadcast_message( + cls, + room_id: str, + discussion_id: str, + agent_id: Optional[str], + content: str, + message_type: str = MessageType.TEXT.value, + round_num: int = 0, + attachments: Optional[List[Dict[str, Any]]] = None, + tool_calls: Optional[List[Dict[str, Any]]] = None, + tool_results: Optional[List[Dict[str, Any]]] = None + ) -> Message: + """ + 保存消息并广播 + + Args: + room_id: 聊天室ID + discussion_id: 讨论ID + agent_id: 发送Agent ID + content: 消息内容 + message_type: 消息类型 + round_num: 轮次号 + attachments: 附件 + tool_calls: 工具调用 + tool_results: 工具结果 + + Returns: + 保存的Message文档 + """ + # 创建消息 + message = Message( + message_id=f"msg-{uuid.uuid4().hex[:12]}", + room_id=room_id, + discussion_id=discussion_id, + agent_id=agent_id, + content=content, + message_type=message_type, + attachments=attachments or [], + round=round_num, + token_count=len(content) // 4, # 粗略估计 + tool_calls=tool_calls or [], + tool_results=tool_results or [], + created_at=datetime.utcnow() + ) + + await message.insert() + + # 构建广播消息 + broadcast_data = { + "type": "message", + "data": { + "message_id": message.message_id, + "room_id": message.room_id, + "discussion_id": message.discussion_id, + "agent_id": message.agent_id, + "content": message.content, + "message_type": message.message_type, + "round": message.round, + "created_at": message.created_at.isoformat() + } + } + + # 广播消息 + await cls.broadcast_to_room(room_id, broadcast_data) + + # 触发回调 + for callback in cls._message_callbacks: + try: + await callback(message) + except Exception as e: + logger.error(f"消息回调执行失败: {e}") + + return message + + @classmethod + async def broadcast_status( + cls, + room_id: str, + status: str, + data: Optional[Dict[str, Any]] = None + ) -> None: + """ + 广播状态更新 + + Args: + room_id: 聊天室ID + status: 状态类型 + data: 附加数据 + """ + message = { + "type": "status", + "status": status, + "data": data or {}, + "timestamp": datetime.utcnow().isoformat() + } + + await cls.broadcast_to_room(room_id, message) + + @classmethod + async def broadcast_typing( + cls, + room_id: str, + agent_id: str, + is_typing: bool = True + ) -> None: + """ + 广播Agent输入状态 + + Args: + room_id: 聊天室ID + agent_id: Agent ID + is_typing: 是否正在输入 + """ + message = { + "type": "typing", + "agent_id": agent_id, + "is_typing": is_typing, + "timestamp": datetime.utcnow().isoformat() + } + + await cls.broadcast_to_room(room_id, message) + + @classmethod + async def broadcast_round_info( + cls, + room_id: str, + round_num: int, + total_rounds: int + ) -> None: + """ + 广播轮次信息 + + Args: + room_id: 聊天室ID + round_num: 当前轮次 + total_rounds: 最大轮次 + """ + message = { + "type": "round", + "round": round_num, + "total_rounds": total_rounds, + "timestamp": datetime.utcnow().isoformat() + } + + await cls.broadcast_to_room(room_id, message) + + @classmethod + async def broadcast_error( + cls, + room_id: str, + error: str, + agent_id: Optional[str] = None + ) -> None: + """ + 广播错误信息 + + Args: + room_id: 聊天室ID + error: 错误信息 + agent_id: 相关Agent ID + """ + message = { + "type": "error", + "error": error, + "agent_id": agent_id, + "timestamp": datetime.utcnow().isoformat() + } + + await cls.broadcast_to_room(room_id, message) + + @classmethod + def register_callback(cls, callback: Callable) -> None: + """ + 注册消息回调 + + Args: + callback: 回调函数,接收Message参数 + """ + cls._message_callbacks.append(callback) + + @classmethod + def unregister_callback(cls, callback: Callable) -> None: + """ + 注销消息回调 + + Args: + callback: 回调函数 + """ + if callback in cls._message_callbacks: + cls._message_callbacks.remove(callback) + + @classmethod + def get_connection_count(cls, room_id: str) -> int: + """ + 获取房间连接数 + + Args: + room_id: 聊天室ID + + Returns: + 连接数 + """ + return len(cls._room_connections.get(room_id, set())) + + @classmethod + def get_all_room_ids(cls) -> List[str]: + """ + 获取所有活跃房间ID + + Returns: + 房间ID列表 + """ + return list(cls._room_connections.keys()) diff --git a/backend/utils/__init__.py b/backend/utils/__init__.py new file mode 100644 index 0000000..70fb9d7 --- /dev/null +++ b/backend/utils/__init__.py @@ -0,0 +1,13 @@ +""" +工具函数模块 +""" +from .encryption import encrypt_api_key, decrypt_api_key +from .proxy_handler import get_http_client +from .rate_limiter import RateLimiter + +__all__ = [ + "encrypt_api_key", + "decrypt_api_key", + "get_http_client", + "RateLimiter", +] diff --git a/backend/utils/encryption.py b/backend/utils/encryption.py new file mode 100644 index 0000000..31f694f --- /dev/null +++ b/backend/utils/encryption.py @@ -0,0 +1,97 @@ +""" +加密工具模块 +用于API密钥的加密和解密 +""" +import base64 +from cryptography.fernet import Fernet +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC +from loguru import logger + +from config import settings + + +def _get_fernet() -> Fernet: + """ + 获取Fernet加密器实例 + 使用配置的加密密钥派生加密密钥 + + Returns: + Fernet加密器 + """ + # 使用PBKDF2从密钥派生32字节密钥 + salt = b"ai_chatroom_salt" # 固定salt,实际生产环境应使用随机salt + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=salt, + iterations=100000, + ) + key = base64.urlsafe_b64encode( + kdf.derive(settings.ENCRYPTION_KEY.encode()) + ) + return Fernet(key) + + +def encrypt_api_key(api_key: str) -> str: + """ + 加密API密钥 + + Args: + api_key: 原始API密钥 + + Returns: + 加密后的密钥字符串 + """ + if not api_key: + return "" + + try: + fernet = _get_fernet() + encrypted = fernet.encrypt(api_key.encode()) + return encrypted.decode() + except Exception as e: + logger.error(f"API密钥加密失败: {e}") + raise ValueError("加密失败") + + +def decrypt_api_key(encrypted_key: str) -> str: + """ + 解密API密钥 + + Args: + encrypted_key: 加密的密钥字符串 + + Returns: + 解密后的原始API密钥 + """ + if not encrypted_key: + return "" + + try: + fernet = _get_fernet() + decrypted = fernet.decrypt(encrypted_key.encode()) + return decrypted.decode() + except Exception as e: + logger.error(f"API密钥解密失败: {e}") + raise ValueError("解密失败,密钥可能已损坏或被篡改") + + +def mask_api_key(api_key: str, visible_chars: int = 4) -> str: + """ + 掩码API密钥,用于安全显示 + + Args: + api_key: 原始API密钥 + visible_chars: 末尾可见字符数 + + Returns: + 掩码后的密钥 (如: ****abc1) + """ + if not api_key: + return "" + + if len(api_key) <= visible_chars: + return "*" * len(api_key) + + return "*" * (len(api_key) - visible_chars) + api_key[-visible_chars:] diff --git a/backend/utils/proxy_handler.py b/backend/utils/proxy_handler.py new file mode 100644 index 0000000..9dfa5f2 --- /dev/null +++ b/backend/utils/proxy_handler.py @@ -0,0 +1,135 @@ +""" +代理处理模块 +处理HTTP代理配置 +""" +from typing import Optional, Dict, Any +import httpx +from loguru import logger + +from config import settings + + +def get_proxy_dict( + use_proxy: bool, + proxy_config: Optional[Dict[str, Any]] = None +) -> Optional[Dict[str, str]]: + """ + 获取代理配置字典 + + Args: + use_proxy: 是否使用代理 + proxy_config: 代理配置 + + Returns: + 代理配置字典或None + """ + if not use_proxy: + return None + + proxies = {} + + if proxy_config: + http_proxy = proxy_config.get("http_proxy") + https_proxy = proxy_config.get("https_proxy") + else: + # 使用全局默认代理 + http_proxy = settings.DEFAULT_HTTP_PROXY + https_proxy = settings.DEFAULT_HTTPS_PROXY + + if http_proxy: + proxies["http://"] = http_proxy + if https_proxy: + proxies["https://"] = https_proxy + + return proxies if proxies else None + + +def get_http_client( + use_proxy: bool = False, + proxy_config: Optional[Dict[str, Any]] = None, + timeout: int = 60, + **kwargs +) -> httpx.AsyncClient: + """ + 获取配置好的HTTP异步客户端 + + Args: + use_proxy: 是否使用代理 + proxy_config: 代理配置 + timeout: 超时时间(秒) + **kwargs: 其他httpx参数 + + Returns: + 配置好的httpx.AsyncClient实例 + """ + proxies = get_proxy_dict(use_proxy, proxy_config) + + client_kwargs = { + "timeout": httpx.Timeout(timeout), + "follow_redirects": True, + **kwargs + } + + if proxies: + client_kwargs["proxies"] = proxies + logger.debug(f"HTTP客户端使用代理: {proxies}") + + return httpx.AsyncClient(**client_kwargs) + + +async def test_proxy_connection( + proxy_config: Dict[str, Any], + test_url: str = "https://www.google.com" +) -> Dict[str, Any]: + """ + 测试代理连接是否可用 + + Args: + proxy_config: 代理配置 + test_url: 测试URL + + Returns: + 测试结果字典,包含 success, message, latency_ms + """ + try: + async with get_http_client( + use_proxy=True, + proxy_config=proxy_config, + timeout=10 + ) as client: + import time + start = time.time() + response = await client.get(test_url) + latency = (time.time() - start) * 1000 + + if response.status_code == 200: + return { + "success": True, + "message": "代理连接正常", + "latency_ms": round(latency, 2) + } + else: + return { + "success": False, + "message": f"代理返回状态码: {response.status_code}", + "latency_ms": round(latency, 2) + } + + except httpx.ProxyError as e: + return { + "success": False, + "message": f"代理连接失败: {str(e)}", + "latency_ms": None + } + except httpx.TimeoutException: + return { + "success": False, + "message": "代理连接超时", + "latency_ms": None + } + except Exception as e: + return { + "success": False, + "message": f"连接错误: {str(e)}", + "latency_ms": None + } diff --git a/backend/utils/rate_limiter.py b/backend/utils/rate_limiter.py new file mode 100644 index 0000000..798dcf3 --- /dev/null +++ b/backend/utils/rate_limiter.py @@ -0,0 +1,233 @@ +""" +速率限制器模块 +使用令牌桶算法控制请求频率 +""" +import asyncio +import time +from typing import Dict, Optional +from dataclasses import dataclass, field +from loguru import logger + + +@dataclass +class TokenBucket: + """令牌桶""" + capacity: int # 桶容量 + tokens: float = field(init=False) # 当前令牌数 + refill_rate: float # 每秒填充速率 + last_refill: float = field(default_factory=time.time) + + def __post_init__(self): + self.tokens = float(self.capacity) + + def _refill(self) -> None: + """填充令牌""" + now = time.time() + elapsed = now - self.last_refill + self.tokens = min( + self.capacity, + self.tokens + elapsed * self.refill_rate + ) + self.last_refill = now + + def consume(self, tokens: int = 1) -> bool: + """ + 尝试消费令牌 + + Args: + tokens: 要消费的令牌数 + + Returns: + 是否消费成功 + """ + self._refill() + if self.tokens >= tokens: + self.tokens -= tokens + return True + return False + + def wait_time(self, tokens: int = 1) -> float: + """ + 计算需要等待的时间 + + Args: + tokens: 需要的令牌数 + + Returns: + 需要等待的秒数 + """ + self._refill() + if self.tokens >= tokens: + return 0.0 + needed = tokens - self.tokens + return needed / self.refill_rate + + +class RateLimiter: + """ + 速率限制器 + 管理多个提供商的速率限制 + """ + + def __init__(self): + self._buckets: Dict[str, TokenBucket] = {} + self._locks: Dict[str, asyncio.Lock] = {} + + def register( + self, + provider_id: str, + requests_per_minute: int = 60, + tokens_per_minute: int = 100000 + ) -> None: + """ + 注册提供商的速率限制 + + Args: + provider_id: 提供商ID + requests_per_minute: 每分钟请求数 + tokens_per_minute: 每分钟token数 + """ + # 请求限制桶 + self._buckets[f"{provider_id}:requests"] = TokenBucket( + capacity=requests_per_minute, + refill_rate=requests_per_minute / 60.0 + ) + + # Token限制桶 + self._buckets[f"{provider_id}:tokens"] = TokenBucket( + capacity=tokens_per_minute, + refill_rate=tokens_per_minute / 60.0 + ) + + # 创建锁 + self._locks[provider_id] = asyncio.Lock() + + logger.debug( + f"注册速率限制: {provider_id} - " + f"{requests_per_minute}请求/分钟, " + f"{tokens_per_minute}tokens/分钟" + ) + + def unregister(self, provider_id: str) -> None: + """ + 取消注册提供商的速率限制 + + Args: + provider_id: 提供商ID + """ + self._buckets.pop(f"{provider_id}:requests", None) + self._buckets.pop(f"{provider_id}:tokens", None) + self._locks.pop(provider_id, None) + + async def acquire( + self, + provider_id: str, + estimated_tokens: int = 1 + ) -> bool: + """ + 获取请求许可(非阻塞) + + Args: + provider_id: 提供商ID + estimated_tokens: 预估token数 + + Returns: + 是否获取成功 + """ + request_bucket = self._buckets.get(f"{provider_id}:requests") + token_bucket = self._buckets.get(f"{provider_id}:tokens") + + if not request_bucket or not token_bucket: + # 未注册,默认允许 + return True + + lock = self._locks.get(provider_id) + if lock: + async with lock: + if request_bucket.consume(1) and token_bucket.consume(estimated_tokens): + return True + + return False + + async def acquire_wait( + self, + provider_id: str, + estimated_tokens: int = 1, + max_wait: float = 60.0 + ) -> bool: + """ + 获取请求许可(阻塞等待) + + Args: + provider_id: 提供商ID + estimated_tokens: 预估token数 + max_wait: 最大等待时间(秒) + + Returns: + 是否获取成功 + """ + request_bucket = self._buckets.get(f"{provider_id}:requests") + token_bucket = self._buckets.get(f"{provider_id}:tokens") + + if not request_bucket or not token_bucket: + return True + + lock = self._locks.get(provider_id) + if not lock: + return True + + start_time = time.time() + + while True: + async with lock: + # 计算需要等待的时间 + request_wait = request_bucket.wait_time(1) + token_wait = token_bucket.wait_time(estimated_tokens) + wait_time = max(request_wait, token_wait) + + if wait_time == 0: + request_bucket.consume(1) + token_bucket.consume(estimated_tokens) + return True + + # 检查是否超时 + elapsed = time.time() - start_time + if elapsed + wait_time > max_wait: + logger.warning( + f"速率限制等待超时: {provider_id}, " + f"需要等待{wait_time:.2f}秒" + ) + return False + + # 在锁外等待 + await asyncio.sleep(min(wait_time, 1.0)) + + def get_status(self, provider_id: str) -> Optional[Dict[str, any]]: + """ + 获取提供商的速率限制状态 + + Args: + provider_id: 提供商ID + + Returns: + 状态字典 + """ + request_bucket = self._buckets.get(f"{provider_id}:requests") + token_bucket = self._buckets.get(f"{provider_id}:tokens") + + if not request_bucket or not token_bucket: + return None + + request_bucket._refill() + token_bucket._refill() + + return { + "requests_remaining": int(request_bucket.tokens), + "requests_capacity": request_bucket.capacity, + "tokens_remaining": int(token_bucket.tokens), + "tokens_capacity": token_bucket.capacity + } + + +# 全局速率限制器实例 +rate_limiter = RateLimiter() diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..e400a7d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,70 @@ +version: '3.8' + +services: + # MongoDB数据库 + mongodb: + image: mongo:7 + container_name: ai-chatroom-mongodb + restart: unless-stopped + ports: + - "27017:27017" + volumes: + - mongodb_data:/data/db + environment: + MONGO_INITDB_ROOT_USERNAME: admin + MONGO_INITDB_ROOT_PASSWORD: ${MONGO_PASSWORD:-chatroom123} + networks: + - chatroom-network + healthcheck: + test: echo 'db.runCommand("ping").ok' | mongosh localhost:27017/test --quiet + interval: 10s + timeout: 5s + retries: 5 + + # 后端服务 + backend: + build: + context: ./backend + dockerfile: Dockerfile + container_name: ai-chatroom-backend + restart: unless-stopped + ports: + - "8000:8000" + depends_on: + mongodb: + condition: service_healthy + environment: + - MONGODB_URL=mongodb://admin:${MONGO_PASSWORD:-chatroom123}@mongodb:27017 + - MONGODB_DB=ai_chatroom + - HOST=0.0.0.0 + - PORT=8000 + - DEBUG=${DEBUG:-false} + - SECRET_KEY=${SECRET_KEY:-change-this-in-production} + - ENCRYPTION_KEY=${ENCRYPTION_KEY:-your-32-byte-encryption-key-here} + volumes: + - ./backend:/app + networks: + - chatroom-network + + # 前端服务 + frontend: + build: + context: ./frontend + dockerfile: Dockerfile + container_name: ai-chatroom-frontend + restart: unless-stopped + ports: + - "3000:80" + depends_on: + - backend + environment: + - VITE_API_URL=http://backend:8000 + networks: + - chatroom-network + +networks: + chatroom-network: + driver: bridge + +volumes: + mongodb_data: diff --git a/frontend/Dockerfile b/frontend/Dockerfile new file mode 100644 index 0000000..91a961c --- /dev/null +++ b/frontend/Dockerfile @@ -0,0 +1,31 @@ +# AI聊天室前端 Dockerfile +# 构建阶段 +FROM node:20-alpine as builder + +WORKDIR /app + +# 复制依赖文件 +COPY package.json . + +# 安装依赖 +RUN npm install + +# 复制源代码 +COPY . . + +# 构建 +RUN npm run build + +# 生产阶段 +FROM nginx:alpine + +# 复制构建产物 +COPY --from=builder /app/dist /usr/share/nginx/html + +# 复制nginx配置 +COPY nginx.conf /etc/nginx/conf.d/default.conf + +# 暴露端口 +EXPOSE 80 + +CMD ["nginx", "-g", "daemon off;"] diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 0000000..ae46307 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,13 @@ + + + + + + + AI聊天室 - 多Agent协作讨论平台 + + +
+ + + diff --git a/frontend/nginx.conf b/frontend/nginx.conf new file mode 100644 index 0000000..fc0c37a --- /dev/null +++ b/frontend/nginx.conf @@ -0,0 +1,46 @@ +server { + listen 80; + server_name localhost; + + root /usr/share/nginx/html; + index index.html; + + # 前端路由支持 + location / { + try_files $uri $uri/ /index.html; + } + + # API代理 + location /api { + proxy_pass http://backend:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 86400; + } + + # WebSocket代理 + location /ws { + proxy_pass http://backend:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_read_timeout 86400; + } + + # 静态资源缓存 + location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ { + expires 30d; + add_header Cache-Control "public, immutable"; + } + + # Gzip压缩 + gzip on; + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + gzip_min_length 1000; +} diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 0000000..857d82c --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,3192 @@ +{ + "name": "ai-chatroom-frontend", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "ai-chatroom-frontend", + "version": "1.0.0", + "dependencies": { + "@ant-design/icons": "^5.2.6", + "antd": "^5.13.0", + "axios": "^1.6.5", + "dayjs": "^1.11.10", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-router-dom": "^6.21.1", + "socket.io-client": "^4.7.4", + "zustand": "^4.4.7" + }, + "devDependencies": { + "@types/react": "^18.2.47", + "@types/react-dom": "^18.2.18", + "@vitejs/plugin-react": "^4.2.1", + "typescript": "^5.3.3", + "vite": "^5.0.11" + } + }, + "node_modules/@ant-design/colors": { + "version": "7.2.1", + "resolved": "https://registry.npmmirror.com/@ant-design/colors/-/colors-7.2.1.tgz", + "integrity": "sha512-lCHDcEzieu4GA3n8ELeZ5VQ8pKQAWcGGLRTQ50aQM2iqPpq2evTxER84jfdPvsPAtEcZ7m44NI45edFMo8oOYQ==", + "license": "MIT", + "dependencies": { + "@ant-design/fast-color": "^2.0.6" + } + }, + "node_modules/@ant-design/cssinjs": { + "version": "1.24.0", + "resolved": "https://registry.npmmirror.com/@ant-design/cssinjs/-/cssinjs-1.24.0.tgz", + "integrity": "sha512-K4cYrJBsgvL+IoozUXYjbT6LHHNt+19a9zkvpBPxLjFHas1UpPM2A5MlhROb0BT8N8WoavM5VsP9MeSeNK/3mg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@emotion/hash": "^0.8.0", + "@emotion/unitless": "^0.7.5", + "classnames": "^2.3.1", + "csstype": "^3.1.3", + "rc-util": "^5.35.0", + "stylis": "^4.3.4" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/cssinjs-utils": { + "version": "1.1.3", + "resolved": "https://registry.npmmirror.com/@ant-design/cssinjs-utils/-/cssinjs-utils-1.1.3.tgz", + "integrity": "sha512-nOoQMLW1l+xR1Co8NFVYiP8pZp3VjIIzqV6D6ShYF2ljtdwWJn5WSsH+7kvCktXL/yhEtWURKOfH5Xz/gzlwsg==", + "license": "MIT", + "dependencies": { + "@ant-design/cssinjs": "^1.21.0", + "@babel/runtime": "^7.23.2", + "rc-util": "^5.38.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@ant-design/fast-color": { + "version": "2.0.6", + "resolved": "https://registry.npmmirror.com/@ant-design/fast-color/-/fast-color-2.0.6.tgz", + "integrity": "sha512-y2217gk4NqL35giHl72o6Zzqji9O7vHh9YmhUVkPtAOpoTCH4uWxo/pr4VE8t0+ChEPs0qo4eJRC5Q1eXWo3vA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.7" + }, + "engines": { + "node": ">=8.x" + } + }, + "node_modules/@ant-design/icons": { + "version": "5.6.1", + "resolved": "https://registry.npmmirror.com/@ant-design/icons/-/icons-5.6.1.tgz", + "integrity": "sha512-0/xS39c91WjPAZOWsvi1//zjx6kAp4kxWwctR6kuU6p133w8RU0D2dSCvZC19uQyharg/sAvYxGYWl01BbZZfg==", + "license": "MIT", + "dependencies": { + "@ant-design/colors": "^7.0.0", + "@ant-design/icons-svg": "^4.4.0", + "@babel/runtime": "^7.24.8", + "classnames": "^2.2.6", + "rc-util": "^5.31.1" + }, + "engines": { + "node": ">=8" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@ant-design/icons-svg": { + "version": "4.4.2", + "resolved": "https://registry.npmmirror.com/@ant-design/icons-svg/-/icons-svg-4.4.2.tgz", + "integrity": "sha512-vHbT+zJEVzllwP+CM+ul7reTEfBR0vgxFe7+lREAsAA7YGsYpboiq2sQNeQeRvh09GfQgs/GyFEvZpJ9cLXpXA==", + "license": "MIT" + }, + "node_modules/@ant-design/react-slick": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/@ant-design/react-slick/-/react-slick-1.1.2.tgz", + "integrity": "sha512-EzlvzE6xQUBrZuuhSAFTdsr4P2bBBHGZwKFemEfq8gIGyIQCxalYfZW/T2ORbtQx5rU69o+WycP3exY/7T1hGA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.4", + "classnames": "^2.2.5", + "json2mq": "^0.2.0", + "resize-observer-polyfill": "^1.5.1", + "throttle-debounce": "^5.0.0" + }, + "peerDependencies": { + "react": ">=16.9.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/generator/-/generator-7.29.0.tgz", + "integrity": "sha512-vSH118/wwM/pLR38g/Sgk05sNtro6TlTJKuiMXDaZqPUfjTFcudpCOt00IhOfj+1BFAX+UFAlzCU+6WXr3GLFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmmirror.com/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/helpers/-/helpers-7.28.6.tgz", + "integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmmirror.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.28.6.tgz", + "integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmmirror.com/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmmirror.com/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@emotion/hash": { + "version": "0.8.0", + "resolved": "https://registry.npmmirror.com/@emotion/hash/-/hash-0.8.0.tgz", + "integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==", + "license": "MIT" + }, + "node_modules/@emotion/unitless": { + "version": "0.7.5", + "resolved": "https://registry.npmmirror.com/@emotion/unitless/-/unitless-0.7.5.tgz", + "integrity": "sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==", + "license": "MIT" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmmirror.com/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@rc-component/async-validator": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/@rc-component/async-validator/-/async-validator-5.1.0.tgz", + "integrity": "sha512-n4HcR5siNUXRX23nDizbZBQPO0ZM/5oTtmKZ6/eqL0L2bo747cklFdZGRN2f+c9qWGICwDzrhW0H7tE9PptdcA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.4" + }, + "engines": { + "node": ">=14.x" + } + }, + "node_modules/@rc-component/color-picker": { + "version": "2.0.1", + "resolved": "https://registry.npmmirror.com/@rc-component/color-picker/-/color-picker-2.0.1.tgz", + "integrity": "sha512-WcZYwAThV/b2GISQ8F+7650r5ZZJ043E57aVBFkQ+kSY4C6wdofXgB0hBx+GPGpIU0Z81eETNoDUJMr7oy/P8Q==", + "license": "MIT", + "dependencies": { + "@ant-design/fast-color": "^2.0.6", + "@babel/runtime": "^7.23.6", + "classnames": "^2.2.6", + "rc-util": "^5.38.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/context": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/@rc-component/context/-/context-1.4.0.tgz", + "integrity": "sha512-kFcNxg9oLRMoL3qki0OMxK+7g5mypjgaaJp/pkOis/6rVxma9nJBF/8kCIuTYHUQNr0ii7MxqE33wirPZLJQ2w==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/mini-decimal": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@rc-component/mini-decimal/-/mini-decimal-1.1.0.tgz", + "integrity": "sha512-jS4E7T9Li2GuYwI6PyiVXmxTiM6b07rlD9Ge8uGZSCz3WlzcG5ZK7g5bbuKNeZ9pgUuPK/5guV781ujdVpm4HQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0" + }, + "engines": { + "node": ">=8.x" + } + }, + "node_modules/@rc-component/mutate-observer": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/@rc-component/mutate-observer/-/mutate-observer-1.1.0.tgz", + "integrity": "sha512-QjrOsDXQusNwGZPf4/qRQasg7UFEj06XiCJ8iuiq/Io7CrHrgVi6Uuetw60WAMG1799v+aM8kyc+1L/GBbHSlw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/portal": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/@rc-component/portal/-/portal-1.1.2.tgz", + "integrity": "sha512-6f813C0IsasTZms08kfA8kPAGxbbkYToa8ALaiDIGGECU4i9hj8Plgbx0sNJDrey3EtHO30hmdaxtT0138xZcg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/qrcode": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/@rc-component/qrcode/-/qrcode-1.1.1.tgz", + "integrity": "sha512-LfLGNymzKdUPjXUbRP+xOhIWY4jQ+YMj5MmWAcgcAq1Ij8XP7tRmAXqyuv96XvLUBE/5cA8hLFl9eO1JQMujrA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.7" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/tour": { + "version": "1.15.1", + "resolved": "https://registry.npmmirror.com/@rc-component/tour/-/tour-1.15.1.tgz", + "integrity": "sha512-Tr2t7J1DKZUpfJuDZWHxyxWpfmj8EZrqSgyMZ+BCdvKZ6r1UDsfU46M/iWAAFBy961Ssfom2kv5f3UcjIL2CmQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "@rc-component/portal": "^1.0.0-9", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.2", + "rc-util": "^5.24.4" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@rc-component/trigger": { + "version": "2.3.1", + "resolved": "https://registry.npmmirror.com/@rc-component/trigger/-/trigger-2.3.1.tgz", + "integrity": "sha512-ORENF39PeXTzM+gQEshuk460Z8N4+6DkjpxlpE7Q3gYy1iBpLrx0FOJz3h62ryrJZ/3zCAUIkT1Pb/8hHWpb3A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.2", + "@rc-component/portal": "^1.1.0", + "classnames": "^2.3.2", + "rc-motion": "^2.0.0", + "rc-resize-observer": "^1.3.1", + "rc-util": "^5.44.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/@remix-run/router": { + "version": "1.23.2", + "resolved": "https://registry.npmmirror.com/@remix-run/router/-/router-1.23.2.tgz", + "integrity": "sha512-Ic6m2U/rMjTkhERIa/0ZtXJP17QUi2CbWE7cqx4J58M8aA3QTfW+2UlQ4psvTX9IO1RfNVhK3pcpdjej7L+t2w==", + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmmirror.com/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz", + "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz", + "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz", + "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz", + "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz", + "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz", + "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz", + "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz", + "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz", + "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz", + "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz", + "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz", + "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz", + "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz", + "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz", + "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz", + "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz", + "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz", + "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz", + "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz", + "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz", + "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz", + "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz", + "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz", + "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz", + "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@socket.io/component-emitter": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz", + "integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==", + "license": "MIT" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmmirror.com/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmmirror.com/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmmirror.com/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmmirror.com/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmmirror.com/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmmirror.com/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.27", + "resolved": "https://registry.npmmirror.com/@types/react/-/react-18.3.27.tgz", + "integrity": "sha512-cisd7gxkzjBKU2GgdYrTdtQx1SORymWyaAFhaxQPK9bYO9ot3Y5OikQRvY0VYQtvwjeQnizCINJAenh/V7MK2w==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmmirror.com/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmmirror.com/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/antd": { + "version": "5.29.3", + "resolved": "https://registry.npmmirror.com/antd/-/antd-5.29.3.tgz", + "integrity": "sha512-3DdbGCa9tWAJGcCJ6rzR8EJFsv2CtyEbkVabZE14pfgUHfCicWCj0/QzQVLDYg8CPfQk9BH7fHCoTXHTy7MP/A==", + "license": "MIT", + "dependencies": { + "@ant-design/colors": "^7.2.1", + "@ant-design/cssinjs": "^1.23.0", + "@ant-design/cssinjs-utils": "^1.1.3", + "@ant-design/fast-color": "^2.0.6", + "@ant-design/icons": "^5.6.1", + "@ant-design/react-slick": "~1.1.2", + "@babel/runtime": "^7.26.0", + "@rc-component/color-picker": "~2.0.1", + "@rc-component/mutate-observer": "^1.1.0", + "@rc-component/qrcode": "~1.1.0", + "@rc-component/tour": "~1.15.1", + "@rc-component/trigger": "^2.3.0", + "classnames": "^2.5.1", + "copy-to-clipboard": "^3.3.3", + "dayjs": "^1.11.11", + "rc-cascader": "~3.34.0", + "rc-checkbox": "~3.5.0", + "rc-collapse": "~3.9.0", + "rc-dialog": "~9.6.0", + "rc-drawer": "~7.3.0", + "rc-dropdown": "~4.2.1", + "rc-field-form": "~2.7.1", + "rc-image": "~7.12.0", + "rc-input": "~1.8.0", + "rc-input-number": "~9.5.0", + "rc-mentions": "~2.20.0", + "rc-menu": "~9.16.1", + "rc-motion": "^2.9.5", + "rc-notification": "~5.6.4", + "rc-pagination": "~5.1.0", + "rc-picker": "~4.11.3", + "rc-progress": "~4.0.0", + "rc-rate": "~2.13.1", + "rc-resize-observer": "^1.4.3", + "rc-segmented": "~2.7.0", + "rc-select": "~14.16.8", + "rc-slider": "~11.1.9", + "rc-steps": "~6.0.1", + "rc-switch": "~4.1.0", + "rc-table": "~7.54.0", + "rc-tabs": "~15.7.0", + "rc-textarea": "~1.10.2", + "rc-tooltip": "~6.4.0", + "rc-tree": "~5.13.1", + "rc-tree-select": "~5.27.0", + "rc-upload": "~4.11.0", + "rc-util": "^5.44.4", + "scroll-into-view-if-needed": "^3.1.0", + "throttle-debounce": "^5.0.2" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ant-design" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmmirror.com/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.13.4", + "resolved": "https://registry.npmmirror.com/axios/-/axios-1.13.4.tgz", + "integrity": "sha512-1wVkUaAO6WyaYtCkcYCOx12ZgpGf9Zif+qXa4n+oYzK558YryKqiL6UWwd5DqiH3VRW0GYhTZQ/vlgJrCoNQlg==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.19", + "resolved": "https://registry.npmmirror.com/baseline-browser-mapping/-/baseline-browser-mapping-2.9.19.tgz", + "integrity": "sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmmirror.com/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001767", + "resolved": "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001767.tgz", + "integrity": "sha512-34+zUAMhSH+r+9eKmYG+k2Rpt8XttfE4yXAjoZvkAPs15xcYQhyBYdalJ65BzivAvGRMViEjy6oKr/S91loekQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/classnames": { + "version": "2.5.1", + "resolved": "https://registry.npmmirror.com/classnames/-/classnames-2.5.1.tgz", + "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==", + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmmirror.com/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/compute-scroll-into-view": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/compute-scroll-into-view/-/compute-scroll-into-view-3.1.1.tgz", + "integrity": "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw==", + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/copy-to-clipboard": { + "version": "3.3.3", + "resolved": "https://registry.npmmirror.com/copy-to-clipboard/-/copy-to-clipboard-3.3.3.tgz", + "integrity": "sha512-2KV8NhB5JqC3ky0r9PMCAZKbUHSwtEo4CwCs0KXgruG43gX5PMqDEBbVU4OUzw2MuAWUfsuFmWvEKG5QRfSnJA==", + "license": "MIT", + "dependencies": { + "toggle-selection": "^1.0.6" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmmirror.com/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/dayjs": { + "version": "1.11.19", + "resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.19.tgz", + "integrity": "sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==", + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmmirror.com/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.283", + "resolved": "https://registry.npmmirror.com/electron-to-chromium/-/electron-to-chromium-1.5.283.tgz", + "integrity": "sha512-3vifjt1HgrGW/h76UEeny+adYApveS9dH2h3p57JYzBSXJIKUJAvtmIytDKjcSCt9xHfrNCFJ7gts6vkhuq++w==", + "dev": true, + "license": "ISC" + }, + "node_modules/engine.io-client": { + "version": "6.6.4", + "resolved": "https://registry.npmmirror.com/engine.io-client/-/engine.io-client-6.6.4.tgz", + "integrity": "sha512-+kjUJnZGwzewFDw951CDWcwj35vMNf2fcj7xQWOctq1F2i1jkDdVvdFG9kM/BEChymCH36KgjnW0NsL58JYRxw==", + "license": "MIT", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.4.1", + "engine.io-parser": "~5.2.1", + "ws": "~8.18.3", + "xmlhttprequest-ssl": "~2.1.1" + } + }, + "node_modules/engine.io-parser": { + "version": "5.2.3", + "resolved": "https://registry.npmmirror.com/engine.io-parser/-/engine.io-parser-5.2.3.tgz", + "integrity": "sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmmirror.com/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmmirror.com/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmmirror.com/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmmirror.com/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json2mq": { + "version": "0.2.0", + "resolved": "https://registry.npmmirror.com/json2mq/-/json2mq-0.2.0.tgz", + "integrity": "sha512-SzoRg7ux5DWTII9J2qkrZrqV1gt+rTaoufMxEzXbS26Uid0NwaJd123HcoB80TgubEppxxIGdNxCx50fEoEWQA==", + "license": "MIT", + "dependencies": { + "string-convert": "^0.2.0" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmmirror.com/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmmirror.com/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmmirror.com/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmmirror.com/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmmirror.com/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/rc-cascader": { + "version": "3.34.0", + "resolved": "https://registry.npmmirror.com/rc-cascader/-/rc-cascader-3.34.0.tgz", + "integrity": "sha512-KpXypcvju9ptjW9FaN2NFcA2QH9E9LHKq169Y0eWtH4e/wHQ5Wh5qZakAgvb8EKZ736WZ3B0zLLOBsrsja5Dag==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.25.7", + "classnames": "^2.3.1", + "rc-select": "~14.16.2", + "rc-tree": "~5.13.0", + "rc-util": "^5.43.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-checkbox": { + "version": "3.5.0", + "resolved": "https://registry.npmmirror.com/rc-checkbox/-/rc-checkbox-3.5.0.tgz", + "integrity": "sha512-aOAQc3E98HteIIsSqm6Xk2FPKIER6+5vyEFMZfo73TqM+VVAIqOkHoPjgKLqSNtVLWScoaM7vY2ZrGEheI79yg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.25.2" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-collapse": { + "version": "3.9.0", + "resolved": "https://registry.npmmirror.com/rc-collapse/-/rc-collapse-3.9.0.tgz", + "integrity": "sha512-swDdz4QZ4dFTo4RAUMLL50qP0EY62N2kvmk2We5xYdRwcRn8WcYtuetCJpwpaCbUfUt5+huLpVxhvmnK+PHrkA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.3.4", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dialog": { + "version": "9.6.0", + "resolved": "https://registry.npmmirror.com/rc-dialog/-/rc-dialog-9.6.0.tgz", + "integrity": "sha512-ApoVi9Z8PaCQg6FsUzS8yvBEQy0ZL2PkuvAgrmohPkN3okps5WZ5WQWPc1RNuiOKaAYv8B97ACdsFU5LizzCqg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/portal": "^1.0.0-8", + "classnames": "^2.2.6", + "rc-motion": "^2.3.0", + "rc-util": "^5.21.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-drawer": { + "version": "7.3.0", + "resolved": "https://registry.npmmirror.com/rc-drawer/-/rc-drawer-7.3.0.tgz", + "integrity": "sha512-DX6CIgiBWNpJIMGFO8BAISFkxiuKitoizooj4BDyee8/SnBn0zwO2FHrNDpqqepj0E/TFTDpmEBCyFuTgC7MOg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.23.9", + "@rc-component/portal": "^1.1.1", + "classnames": "^2.2.6", + "rc-motion": "^2.6.1", + "rc-util": "^5.38.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-dropdown": { + "version": "4.2.1", + "resolved": "https://registry.npmmirror.com/rc-dropdown/-/rc-dropdown-4.2.1.tgz", + "integrity": "sha512-YDAlXsPv3I1n42dv1JpdM7wJ+gSUBfeyPK59ZpBD9jQhK9jVuxpjj3NmWQHOBceA1zEPVX84T2wbdb2SD0UjmA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-util": "^5.44.1" + }, + "peerDependencies": { + "react": ">=16.11.0", + "react-dom": ">=16.11.0" + } + }, + "node_modules/rc-field-form": { + "version": "2.7.1", + "resolved": "https://registry.npmmirror.com/rc-field-form/-/rc-field-form-2.7.1.tgz", + "integrity": "sha512-vKeSifSJ6HoLaAB+B8aq/Qgm8a3dyxROzCtKNCsBQgiverpc4kWDQihoUwzUj+zNWJOykwSY4dNX3QrGwtVb9A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.0", + "@rc-component/async-validator": "^5.0.3", + "rc-util": "^5.32.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-image": { + "version": "7.12.0", + "resolved": "https://registry.npmmirror.com/rc-image/-/rc-image-7.12.0.tgz", + "integrity": "sha512-cZ3HTyyckPnNnUb9/DRqduqzLfrQRyi+CdHjdqgsyDpI3Ln5UX1kXnAhPBSJj9pVRzwRFgqkN7p9b6HBDjmu/Q==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "@rc-component/portal": "^1.0.2", + "classnames": "^2.2.6", + "rc-dialog": "~9.6.0", + "rc-motion": "^2.6.2", + "rc-util": "^5.34.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-input": { + "version": "1.8.0", + "resolved": "https://registry.npmmirror.com/rc-input/-/rc-input-1.8.0.tgz", + "integrity": "sha512-KXvaTbX+7ha8a/k+eg6SYRVERK0NddX8QX7a7AnRvUa/rEH0CNMlpcBzBkhI0wp2C8C4HlMoYl8TImSN+fuHKA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.18.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-input-number": { + "version": "9.5.0", + "resolved": "https://registry.npmmirror.com/rc-input-number/-/rc-input-number-9.5.0.tgz", + "integrity": "sha512-bKaEvB5tHebUURAEXw35LDcnRZLq3x1k7GxfAqBMzmpHkDGzjAtnUL8y4y5N15rIFIg5IJgwr211jInl3cipag==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/mini-decimal": "^1.0.1", + "classnames": "^2.2.5", + "rc-input": "~1.8.0", + "rc-util": "^5.40.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-mentions": { + "version": "2.20.0", + "resolved": "https://registry.npmmirror.com/rc-mentions/-/rc-mentions-2.20.0.tgz", + "integrity": "sha512-w8HCMZEh3f0nR8ZEd466ATqmXFCMGMN5UFCzEUL0bM/nGw/wOS2GgRzKBcm19K++jDyuWCOJOdgcKGXU3fXfbQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.22.5", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.6", + "rc-input": "~1.8.0", + "rc-menu": "~9.16.0", + "rc-textarea": "~1.10.0", + "rc-util": "^5.34.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-menu": { + "version": "9.16.1", + "resolved": "https://registry.npmmirror.com/rc-menu/-/rc-menu-9.16.1.tgz", + "integrity": "sha512-ghHx6/6Dvp+fw8CJhDUHFHDJ84hJE3BXNCzSgLdmNiFErWSOaZNsihDAsKq9ByTALo/xkNIwtDFGIl6r+RPXBg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.0.0", + "classnames": "2.x", + "rc-motion": "^2.4.3", + "rc-overflow": "^1.3.1", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-motion": { + "version": "2.9.5", + "resolved": "https://registry.npmmirror.com/rc-motion/-/rc-motion-2.9.5.tgz", + "integrity": "sha512-w+XTUrfh7ArbYEd2582uDrEhmBHwK1ZENJiSJVb7uRxdE7qJSYjbO2eksRXmndqyKqKoYPc9ClpPh5242mV1vA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.44.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-notification": { + "version": "5.6.4", + "resolved": "https://registry.npmmirror.com/rc-notification/-/rc-notification-5.6.4.tgz", + "integrity": "sha512-KcS4O6B4qzM3KH7lkwOB7ooLPZ4b6J+VMmQgT51VZCeEcmghdeR4IrMcFq0LG+RPdnbe/ArT086tGM8Snimgiw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.9.0", + "rc-util": "^5.20.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-overflow": { + "version": "1.5.0", + "resolved": "https://registry.npmmirror.com/rc-overflow/-/rc-overflow-1.5.0.tgz", + "integrity": "sha512-Lm/v9h0LymeUYJf0x39OveU52InkdRXqnn2aYXfWmo8WdOonIKB2kfau+GF0fWq6jPgtdO9yMqveGcK6aIhJmg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.37.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-pagination": { + "version": "5.1.0", + "resolved": "https://registry.npmmirror.com/rc-pagination/-/rc-pagination-5.1.0.tgz", + "integrity": "sha512-8416Yip/+eclTFdHXLKTxZvn70duYVGTvUUWbckCCZoIl3jagqke3GLsFrMs0bsQBikiYpZLD9206Ej4SOdOXQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.3.2", + "rc-util": "^5.38.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-picker": { + "version": "4.11.3", + "resolved": "https://registry.npmmirror.com/rc-picker/-/rc-picker-4.11.3.tgz", + "integrity": "sha512-MJ5teb7FlNE0NFHTncxXQ62Y5lytq6sh5nUw0iH8OkHL/TjARSEvSHpr940pWgjGANpjCwyMdvsEV55l5tYNSg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.24.7", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.2.1", + "rc-overflow": "^1.3.2", + "rc-resize-observer": "^1.4.0", + "rc-util": "^5.43.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "date-fns": ">= 2.x", + "dayjs": ">= 1.x", + "luxon": ">= 3.x", + "moment": ">= 2.x", + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + }, + "peerDependenciesMeta": { + "date-fns": { + "optional": true + }, + "dayjs": { + "optional": true + }, + "luxon": { + "optional": true + }, + "moment": { + "optional": true + } + } + }, + "node_modules/rc-progress": { + "version": "4.0.0", + "resolved": "https://registry.npmmirror.com/rc-progress/-/rc-progress-4.0.0.tgz", + "integrity": "sha512-oofVMMafOCokIUIBnZLNcOZFsABaUw8PPrf1/y0ZBvKZNpOiu5h4AO9vv11Sw0p4Hb3D0yGWuEattcQGtNJ/aw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.6", + "rc-util": "^5.16.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-rate": { + "version": "2.13.1", + "resolved": "https://registry.npmmirror.com/rc-rate/-/rc-rate-2.13.1.tgz", + "integrity": "sha512-QUhQ9ivQ8Gy7mtMZPAjLbxBt5y9GRp65VcUyGUMF3N3fhiftivPHdpuDIaWIMOTEprAjZPC08bls1dQB+I1F2Q==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.0.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-resize-observer": { + "version": "1.4.3", + "resolved": "https://registry.npmmirror.com/rc-resize-observer/-/rc-resize-observer-1.4.3.tgz", + "integrity": "sha512-YZLjUbyIWox8E9i9C3Tm7ia+W7euPItNWSPX5sCcQTYbnwDb5uNpnLHQCG1f22oZWUhLw4Mv2tFmeWe68CDQRQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.7", + "classnames": "^2.2.1", + "rc-util": "^5.44.1", + "resize-observer-polyfill": "^1.5.1" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-segmented": { + "version": "2.7.1", + "resolved": "https://registry.npmmirror.com/rc-segmented/-/rc-segmented-2.7.1.tgz", + "integrity": "sha512-izj1Nw/Dw2Vb7EVr+D/E9lUTkBe+kKC+SAFSU9zqr7WV2W5Ktaa9Gc7cB2jTqgk8GROJayltaec+DBlYKc6d+g==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-motion": "^2.4.4", + "rc-util": "^5.17.0" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/rc-select": { + "version": "14.16.8", + "resolved": "https://registry.npmmirror.com/rc-select/-/rc-select-14.16.8.tgz", + "integrity": "sha512-NOV5BZa1wZrsdkKaiK7LHRuo5ZjZYMDxPP6/1+09+FB4KoNi8jcG1ZqLE3AVCxEsYMBe65OBx71wFoHRTP3LRg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/trigger": "^2.1.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-overflow": "^1.3.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-slider": { + "version": "11.1.9", + "resolved": "https://registry.npmmirror.com/rc-slider/-/rc-slider-11.1.9.tgz", + "integrity": "sha512-h8IknhzSh3FEM9u8ivkskh+Ef4Yo4JRIY2nj7MrH6GQmrwV6mcpJf5/4KgH5JaVI1H3E52yCdpOlVyGZIeph5A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.5", + "rc-util": "^5.36.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-steps": { + "version": "6.0.1", + "resolved": "https://registry.npmmirror.com/rc-steps/-/rc-steps-6.0.1.tgz", + "integrity": "sha512-lKHL+Sny0SeHkQKKDJlAjV5oZ8DwCdS2hFhAkIjuQt1/pB81M0cA0ErVFdHq9+jmPmFw1vJB2F5NBzFXLJxV+g==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.16.7", + "classnames": "^2.2.3", + "rc-util": "^5.16.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-switch": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/rc-switch/-/rc-switch-4.1.0.tgz", + "integrity": "sha512-TI8ufP2Az9oEbvyCeVE4+90PDSljGyuwix3fV58p7HV2o4wBnVToEyomJRVyTaZeqNPAp+vqeo4Wnj5u0ZZQBg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.21.0", + "classnames": "^2.2.1", + "rc-util": "^5.30.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-table": { + "version": "7.54.0", + "resolved": "https://registry.npmmirror.com/rc-table/-/rc-table-7.54.0.tgz", + "integrity": "sha512-/wDTkki6wBTjwylwAGjpLKYklKo9YgjZwAU77+7ME5mBoS32Q4nAwoqhA2lSge6fobLW3Tap6uc5xfwaL2p0Sw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "@rc-component/context": "^1.4.0", + "classnames": "^2.2.5", + "rc-resize-observer": "^1.1.0", + "rc-util": "^5.44.3", + "rc-virtual-list": "^3.14.2" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tabs": { + "version": "15.7.0", + "resolved": "https://registry.npmmirror.com/rc-tabs/-/rc-tabs-15.7.0.tgz", + "integrity": "sha512-ZepiE+6fmozYdWf/9gVp7k56PKHB1YYoDsKeQA1CBlJ/POIhjkcYiv0AGP0w2Jhzftd3AVvZP/K+V+Lpi2ankA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "classnames": "2.x", + "rc-dropdown": "~4.2.0", + "rc-menu": "~9.16.0", + "rc-motion": "^2.6.2", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.34.1" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-textarea": { + "version": "1.10.2", + "resolved": "https://registry.npmmirror.com/rc-textarea/-/rc-textarea-1.10.2.tgz", + "integrity": "sha512-HfaeXiaSlpiSp0I/pvWpecFEHpVysZ9tpDLNkxQbMvMz6gsr7aVZ7FpWP9kt4t7DB+jJXesYS0us1uPZnlRnwQ==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "^2.2.1", + "rc-input": "~1.8.0", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.27.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tooltip": { + "version": "6.4.0", + "resolved": "https://registry.npmmirror.com/rc-tooltip/-/rc-tooltip-6.4.0.tgz", + "integrity": "sha512-kqyivim5cp8I5RkHmpsp1Nn/Wk+1oeloMv9c7LXNgDxUpGm+RbXJGL+OPvDlcRnx9DBeOe4wyOIl4OKUERyH1g==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.2", + "@rc-component/trigger": "^2.0.0", + "classnames": "^2.3.1", + "rc-util": "^5.44.3" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-tree": { + "version": "5.13.1", + "resolved": "https://registry.npmmirror.com/rc-tree/-/rc-tree-5.13.1.tgz", + "integrity": "sha512-FNhIefhftobCdUJshO7M8uZTA9F4OPGVXqGfZkkD/5soDeOhwO06T/aKTrg0WD8gRg/pyfq+ql3aMymLHCTC4A==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.10.1", + "classnames": "2.x", + "rc-motion": "^2.0.1", + "rc-util": "^5.16.1", + "rc-virtual-list": "^3.5.1" + }, + "engines": { + "node": ">=10.x" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-tree-select": { + "version": "5.27.0", + "resolved": "https://registry.npmmirror.com/rc-tree-select/-/rc-tree-select-5.27.0.tgz", + "integrity": "sha512-2qTBTzwIT7LRI1o7zLyrCzmo5tQanmyGbSaGTIf7sYimCklAToVVfpMC6OAldSKolcnjorBYPNSKQqJmN3TCww==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.25.7", + "classnames": "2.x", + "rc-select": "~14.16.2", + "rc-tree": "~5.13.0", + "rc-util": "^5.43.0" + }, + "peerDependencies": { + "react": "*", + "react-dom": "*" + } + }, + "node_modules/rc-upload": { + "version": "4.11.0", + "resolved": "https://registry.npmmirror.com/rc-upload/-/rc-upload-4.11.0.tgz", + "integrity": "sha512-ZUyT//2JAehfHzjWowqROcwYJKnZkIUGWaTE/VogVrepSl7AFNbQf4+zGfX4zl9Vrj/Jm8scLO0R6UlPDKK4wA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "classnames": "^2.2.5", + "rc-util": "^5.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-util": { + "version": "5.44.4", + "resolved": "https://registry.npmmirror.com/rc-util/-/rc-util-5.44.4.tgz", + "integrity": "sha512-resueRJzmHG9Q6rI/DfK6Kdv9/Lfls05vzMs1Sk3M2P+3cJa+MakaZyWY8IPfehVuhPJFKrIY1IK4GqbiaiY5w==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "react-is": "^18.2.0" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/rc-virtual-list": { + "version": "3.19.2", + "resolved": "https://registry.npmmirror.com/rc-virtual-list/-/rc-virtual-list-3.19.2.tgz", + "integrity": "sha512-Ys6NcjwGkuwkeaWBDqfI3xWuZ7rDiQXlH1o2zLfFzATfEgXcqpk8CkgMfbJD81McqjcJVez25a3kPxCR807evA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.20.0", + "classnames": "^2.2.6", + "rc-resize-observer": "^1.0.0", + "rc-util": "^5.36.0" + }, + "engines": { + "node": ">=8.x" + }, + "peerDependencies": { + "react": ">=16.9.0", + "react-dom": ">=16.9.0" + } + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmmirror.com/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmmirror.com/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmmirror.com/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmmirror.com/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-router": { + "version": "6.30.3", + "resolved": "https://registry.npmmirror.com/react-router/-/react-router-6.30.3.tgz", + "integrity": "sha512-XRnlbKMTmktBkjCLE8/XcZFlnHvr2Ltdr1eJX4idL55/9BbORzyZEaIkBFDhFGCEWBBItsVrDxwx3gnisMitdw==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8" + } + }, + "node_modules/react-router-dom": { + "version": "6.30.3", + "resolved": "https://registry.npmmirror.com/react-router-dom/-/react-router-dom-6.30.3.tgz", + "integrity": "sha512-pxPcv1AczD4vso7G4Z3TKcvlxK7g7TNt3/FNGMhfqyntocvYKj+GCatfigGDjbLozC4baguJ0ReCigoDJXb0ag==", + "license": "MIT", + "dependencies": { + "@remix-run/router": "1.23.2", + "react-router": "6.30.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "react": ">=16.8", + "react-dom": ">=16.8" + } + }, + "node_modules/resize-observer-polyfill": { + "version": "1.5.1", + "resolved": "https://registry.npmmirror.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz", + "integrity": "sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==", + "license": "MIT" + }, + "node_modules/rollup": { + "version": "4.57.1", + "resolved": "https://registry.npmmirror.com/rollup/-/rollup-4.57.1.tgz", + "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.57.1", + "@rollup/rollup-android-arm64": "4.57.1", + "@rollup/rollup-darwin-arm64": "4.57.1", + "@rollup/rollup-darwin-x64": "4.57.1", + "@rollup/rollup-freebsd-arm64": "4.57.1", + "@rollup/rollup-freebsd-x64": "4.57.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.57.1", + "@rollup/rollup-linux-arm-musleabihf": "4.57.1", + "@rollup/rollup-linux-arm64-gnu": "4.57.1", + "@rollup/rollup-linux-arm64-musl": "4.57.1", + "@rollup/rollup-linux-loong64-gnu": "4.57.1", + "@rollup/rollup-linux-loong64-musl": "4.57.1", + "@rollup/rollup-linux-ppc64-gnu": "4.57.1", + "@rollup/rollup-linux-ppc64-musl": "4.57.1", + "@rollup/rollup-linux-riscv64-gnu": "4.57.1", + "@rollup/rollup-linux-riscv64-musl": "4.57.1", + "@rollup/rollup-linux-s390x-gnu": "4.57.1", + "@rollup/rollup-linux-x64-gnu": "4.57.1", + "@rollup/rollup-linux-x64-musl": "4.57.1", + "@rollup/rollup-openbsd-x64": "4.57.1", + "@rollup/rollup-openharmony-arm64": "4.57.1", + "@rollup/rollup-win32-arm64-msvc": "4.57.1", + "@rollup/rollup-win32-ia32-msvc": "4.57.1", + "@rollup/rollup-win32-x64-gnu": "4.57.1", + "@rollup/rollup-win32-x64-msvc": "4.57.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmmirror.com/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/scroll-into-view-if-needed": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/scroll-into-view-if-needed/-/scroll-into-view-if-needed-3.1.0.tgz", + "integrity": "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==", + "license": "MIT", + "dependencies": { + "compute-scroll-into-view": "^3.0.2" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmmirror.com/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/socket.io-client": { + "version": "4.8.3", + "resolved": "https://registry.npmmirror.com/socket.io-client/-/socket.io-client-4.8.3.tgz", + "integrity": "sha512-uP0bpjWrjQmUt5DTHq9RuoCBdFJF10cdX9X+a368j/Ft0wmaVgxlrjvK3kjvgCODOMMOz9lcaRzxmso0bTWZ/g==", + "license": "MIT", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.4.1", + "engine.io-client": "~6.6.1", + "socket.io-parser": "~4.2.4" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/socket.io-parser": { + "version": "4.2.5", + "resolved": "https://registry.npmmirror.com/socket.io-parser/-/socket.io-parser-4.2.5.tgz", + "integrity": "sha512-bPMmpy/5WWKHea5Y/jYAP6k74A+hvmRCQaJuJB6I/ML5JZq/KfNieUVo/3Mh7SAqn7TyFdIo6wqYHInG1MU1bQ==", + "license": "MIT", + "dependencies": { + "@socket.io/component-emitter": "~3.1.0", + "debug": "~4.4.1" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/string-convert": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/string-convert/-/string-convert-0.2.1.tgz", + "integrity": "sha512-u/1tdPl4yQnPBjnVrmdLo9gtuLvELKsAoRapekWggdiQNvvvum+jYF329d84NAa660KQw7pB2n36KrIKVoXa3A==", + "license": "MIT" + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmmirror.com/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, + "node_modules/throttle-debounce": { + "version": "5.0.2", + "resolved": "https://registry.npmmirror.com/throttle-debounce/-/throttle-debounce-5.0.2.tgz", + "integrity": "sha512-B71/4oyj61iNH0KeCamLuE2rmKuTO5byTOSVwECM5FA7TiAiAW+UqTKZ9ERueC4qvgSttUhdmq1mXC3kJqGX7A==", + "license": "MIT", + "engines": { + "node": ">=12.22" + } + }, + "node_modules/toggle-selection": { + "version": "1.0.6", + "resolved": "https://registry.npmmirror.com/toggle-selection/-/toggle-selection-1.0.6.tgz", + "integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ==", + "license": "MIT" + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmmirror.com/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmmirror.com/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmmirror.com/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmmirror.com/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/ws": { + "version": "8.18.3", + "resolved": "https://registry.npmmirror.com/ws/-/ws-8.18.3.tgz", + "integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xmlhttprequest-ssl": { + "version": "2.1.2", + "resolved": "https://registry.npmmirror.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-2.1.2.tgz", + "integrity": "sha512-TEU+nJVUUnA4CYJFLvK5X9AOeH4KvDvhIfm0vV1GaQRtchnG0hgK5p8hw/xjv8cunWYCsiPCSDzObPyhEwq3KQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/zustand": { + "version": "4.5.7", + "resolved": "https://registry.npmmirror.com/zustand/-/zustand-4.5.7.tgz", + "integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==", + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.2.2" + }, + "engines": { + "node": ">=12.7.0" + }, + "peerDependencies": { + "@types/react": ">=16.8", + "immer": ">=9.0.6", + "react": ">=16.8" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + } + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..8921b11 --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,28 @@ +{ + "name": "ai-chatroom-frontend", + "version": "1.0.0", + "private": true, + "dependencies": { + "@ant-design/icons": "^5.2.6", + "antd": "^5.13.0", + "axios": "^1.6.5", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-router-dom": "^6.21.1", + "zustand": "^4.4.7", + "socket.io-client": "^4.7.4", + "dayjs": "^1.11.10" + }, + "devDependencies": { + "@types/react": "^18.2.47", + "@types/react-dom": "^18.2.18", + "@vitejs/plugin-react": "^4.2.1", + "typescript": "^5.3.3", + "vite": "^5.0.11" + }, + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "preview": "vite preview" + } +} diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000..c5b33dd --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,39 @@ +/** + * 应用主组件 + * 定义路由和布局 + */ +import React from 'react' +import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom' +import { Layout } from 'antd' +import Sidebar from './components/Sidebar' +import Dashboard from './pages/Dashboard' +import ProviderConfig from './pages/ProviderConfig' +import AgentManagement from './pages/AgentManagement' +import ChatRoom from './pages/ChatRoom' +import DiscussionHistory from './pages/DiscussionHistory' + +const { Content } = Layout + +const App: React.FC = () => { + return ( + + + + + + + } /> + } /> + } /> + } /> + } /> + } /> + + + + + + ) +} + +export default App diff --git a/frontend/src/components/AgentCard.tsx b/frontend/src/components/AgentCard.tsx new file mode 100644 index 0000000..cb9959b --- /dev/null +++ b/frontend/src/components/AgentCard.tsx @@ -0,0 +1,122 @@ +/** + * Agent卡片组件 + */ +import React from 'react' +import { Card, Avatar, Tag, Switch, Typography, Space, Button, Tooltip } from 'antd' +import { + RobotOutlined, + EditOutlined, + DeleteOutlined, + CopyOutlined, + PlayCircleOutlined +} from '@ant-design/icons' +import type { Agent } from '../types' + +const { Text, Paragraph } = Typography + +interface AgentCardProps { + agent: Agent + onEdit?: (agent: Agent) => void + onDelete?: (agent: Agent) => void + onDuplicate?: (agent: Agent) => void + onTest?: (agent: Agent) => void + onToggleEnabled?: (agent: Agent, enabled: boolean) => void +} + +const AgentCard: React.FC = ({ + agent, + onEdit, + onDelete, + onDuplicate, + onTest, + onToggleEnabled +}) => { + return ( + + + + + } + rules={[{ required: true, message: '请输入系统提示词' }]} + > +