- 实现Agent管理,支持AI辅助生成系统提示词 - 支持多个AI提供商(OpenRouter、智谱、MiniMax等) - 实现聊天室和讨论引擎 - WebSocket实时消息推送 - 前端使用React + Ant Design - 后端使用FastAPI + MongoDB Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
150 lines
4.7 KiB
Python
150 lines
4.7 KiB
Python
"""
|
|
AI接口提供商数据模型
|
|
定义AI服务配置结构
|
|
"""
|
|
from datetime import datetime
|
|
from typing import Optional, Dict, Any, List
|
|
from enum import Enum
|
|
from pydantic import Field
|
|
from beanie import Document
|
|
|
|
|
|
class ProviderType(str, Enum):
|
|
"""AI提供商类型枚举"""
|
|
MINIMAX = "minimax"
|
|
ZHIPU = "zhipu"
|
|
OPENROUTER = "openrouter"
|
|
KIMI = "kimi"
|
|
DEEPSEEK = "deepseek"
|
|
GEMINI = "gemini"
|
|
OLLAMA = "ollama"
|
|
LLMSTUDIO = "llmstudio"
|
|
|
|
|
|
class ProxyConfig:
|
|
"""代理配置"""
|
|
http_proxy: Optional[str] = None # HTTP代理地址
|
|
https_proxy: Optional[str] = None # HTTPS代理地址
|
|
no_proxy: List[str] = [] # 不使用代理的域名列表
|
|
|
|
def __init__(
|
|
self,
|
|
http_proxy: Optional[str] = None,
|
|
https_proxy: Optional[str] = None,
|
|
no_proxy: Optional[List[str]] = None
|
|
):
|
|
self.http_proxy = http_proxy
|
|
self.https_proxy = https_proxy
|
|
self.no_proxy = no_proxy or []
|
|
|
|
def to_dict(self) -> Dict[str, Any]:
|
|
"""转换为字典"""
|
|
return {
|
|
"http_proxy": self.http_proxy,
|
|
"https_proxy": self.https_proxy,
|
|
"no_proxy": self.no_proxy
|
|
}
|
|
|
|
@classmethod
|
|
def from_dict(cls, data: Dict[str, Any]) -> "ProxyConfig":
|
|
"""从字典创建"""
|
|
if not data:
|
|
return cls()
|
|
return cls(
|
|
http_proxy=data.get("http_proxy"),
|
|
https_proxy=data.get("https_proxy"),
|
|
no_proxy=data.get("no_proxy", [])
|
|
)
|
|
|
|
|
|
class RateLimit:
|
|
"""速率限制配置"""
|
|
requests_per_minute: int = 60 # 每分钟请求数
|
|
tokens_per_minute: int = 100000 # 每分钟token数
|
|
|
|
def __init__(
|
|
self,
|
|
requests_per_minute: int = 60,
|
|
tokens_per_minute: int = 100000
|
|
):
|
|
self.requests_per_minute = requests_per_minute
|
|
self.tokens_per_minute = tokens_per_minute
|
|
|
|
def to_dict(self) -> Dict[str, int]:
|
|
"""转换为字典"""
|
|
return {
|
|
"requests_per_minute": self.requests_per_minute,
|
|
"tokens_per_minute": self.tokens_per_minute
|
|
}
|
|
|
|
@classmethod
|
|
def from_dict(cls, data: Dict[str, int]) -> "RateLimit":
|
|
"""从字典创建"""
|
|
if not data:
|
|
return cls()
|
|
return cls(
|
|
requests_per_minute=data.get("requests_per_minute", 60),
|
|
tokens_per_minute=data.get("tokens_per_minute", 100000)
|
|
)
|
|
|
|
|
|
class AIProvider(Document):
|
|
"""
|
|
AI接口提供商文档模型
|
|
存储各AI服务的配置信息
|
|
"""
|
|
provider_id: str = Field(..., description="唯一标识")
|
|
provider_type: str = Field(..., description="提供商类型: minimax, zhipu等")
|
|
name: str = Field(..., description="自定义名称")
|
|
api_key: str = Field(default="", description="API密钥(加密存储)")
|
|
base_url: str = Field(default="", description="API基础URL")
|
|
model: str = Field(..., description="使用的模型名称")
|
|
|
|
# 代理配置
|
|
use_proxy: bool = Field(default=False, description="是否使用代理")
|
|
proxy_config: Dict[str, Any] = Field(default_factory=dict, description="代理配置")
|
|
|
|
# 速率限制
|
|
rate_limit: Dict[str, int] = Field(
|
|
default_factory=lambda: {"requests_per_minute": 60, "tokens_per_minute": 100000},
|
|
description="速率限制配置"
|
|
)
|
|
|
|
# 其他配置
|
|
timeout: int = Field(default=60, description="超时时间(秒)")
|
|
extra_params: Dict[str, Any] = Field(default_factory=dict, description="额外参数")
|
|
|
|
# 元数据
|
|
enabled: bool = Field(default=True, description="是否启用")
|
|
created_at: datetime = Field(default_factory=datetime.utcnow)
|
|
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
|
|
|
class Settings:
|
|
name = "ai_providers"
|
|
|
|
def get_proxy_config(self) -> ProxyConfig:
|
|
"""获取代理配置对象"""
|
|
return ProxyConfig.from_dict(self.proxy_config)
|
|
|
|
def get_rate_limit(self) -> RateLimit:
|
|
"""获取速率限制配置对象"""
|
|
return RateLimit.from_dict(self.rate_limit)
|
|
|
|
class Config:
|
|
json_schema_extra = {
|
|
"example": {
|
|
"provider_id": "openrouter-gpt4",
|
|
"provider_type": "openrouter",
|
|
"name": "OpenRouter GPT-4",
|
|
"api_key": "sk-xxx",
|
|
"base_url": "https://openrouter.ai/api/v1",
|
|
"model": "openai/gpt-4-turbo",
|
|
"use_proxy": True,
|
|
"proxy_config": {
|
|
"http_proxy": "http://127.0.0.1:7890",
|
|
"https_proxy": "http://127.0.0.1:7890"
|
|
},
|
|
"timeout": 60
|
|
}
|
|
}
|