57 lines
1.9 KiB
Python
57 lines
1.9 KiB
Python
|
|
"""OpenAI (GPT) 适配器"""
|
||
|
|
|
||
|
|
from __future__ import annotations
|
||
|
|
|
||
|
|
import json
|
||
|
|
from typing import Any, AsyncGenerator
|
||
|
|
|
||
|
|
import httpx
|
||
|
|
|
||
|
|
from app.providers.base import BaseProvider, Capability, QuotaInfo
|
||
|
|
|
||
|
|
|
||
|
|
class OpenAIProvider(BaseProvider):
|
||
|
|
name = "openai"
|
||
|
|
display_name = "OpenAI"
|
||
|
|
capabilities = [Capability.CHAT, Capability.IMAGE, Capability.EMBEDDING]
|
||
|
|
|
||
|
|
async def chat(
|
||
|
|
self,
|
||
|
|
messages: list[dict],
|
||
|
|
model: str,
|
||
|
|
plan: dict,
|
||
|
|
stream: bool = True,
|
||
|
|
**kwargs,
|
||
|
|
) -> AsyncGenerator[str, None]:
|
||
|
|
url = f"{self._base_url(plan)}/chat/completions"
|
||
|
|
body: dict[str, Any] = {
|
||
|
|
"model": model,
|
||
|
|
"messages": messages,
|
||
|
|
"stream": stream,
|
||
|
|
**kwargs,
|
||
|
|
}
|
||
|
|
headers = self._build_headers(plan)
|
||
|
|
|
||
|
|
async with httpx.AsyncClient(timeout=120) as client:
|
||
|
|
if stream:
|
||
|
|
async with client.stream("POST", url, json=body, headers=headers) as resp:
|
||
|
|
resp.raise_for_status()
|
||
|
|
async for line in resp.aiter_lines():
|
||
|
|
if line.startswith("data: "):
|
||
|
|
yield line + "\n\n"
|
||
|
|
elif line.strip() == "":
|
||
|
|
continue
|
||
|
|
else:
|
||
|
|
resp = await client.post(url, json=body, headers=headers)
|
||
|
|
resp.raise_for_status()
|
||
|
|
yield json.dumps(resp.json())
|
||
|
|
|
||
|
|
async def generate_image(self, prompt: str, plan: dict, **kwargs) -> dict[str, Any]:
|
||
|
|
url = f"{self._base_url(plan)}/images/generations"
|
||
|
|
body = {"model": kwargs.get("model", "dall-e-3"), "prompt": prompt, "n": 1, **kwargs}
|
||
|
|
headers = self._build_headers(plan)
|
||
|
|
async with httpx.AsyncClient(timeout=120) as client:
|
||
|
|
resp = await client.post(url, json=body, headers=headers)
|
||
|
|
resp.raise_for_status()
|
||
|
|
return resp.json()
|