Compare commits

...

50 Commits

Author SHA1 Message Date
arch3rPro
9f19223ab7 feat: update nocodb 2026.04.0 2026-04-14 05:33:06 +08:00
arch3rPro
e247555ec3 feat: update new-api 0.12.9 2026-04-14 05:32:43 +08:00
arch3rPro
ddb4652d65 feat: update new-api 0.12.9-allinone 2026-04-14 05:32:20 +08:00
arch3rPro
710a218489 feat: update n8n-zh 2.17.0 2026-04-14 05:32:08 +08:00
arch3rPro
f18bb659fd feat: update inspector 0.21.2 2026-04-14 05:31:34 +08:00
arch3rPro
300b4fd8da feat: update new-api 0.12.8 2026-04-13 05:32:25 +08:00
arch3rPro
b0b24b0adc feat: update new-api 0.12.8-allinone 2026-04-13 05:32:01 +08:00
arch3rPro
b3959314d1 feat: update blinko 1.8.7 2026-04-13 05:30:25 +08:00
arch3rPro
ec095eba5d fix(axonhub): 修正docker-compose中镜像标签格式
将镜像标签从0.9.31改为v0.9.31以保持版本标签一致性
2026-04-12 17:09:47 +08:00
arch3rPro
e44146ebf6 feat(axonhub): 添加axonhub应用配置及文档
添加axonhub应用的docker-compose配置、数据文件、logo和README文档
2026-04-12 17:07:04 +08:00
arch3rPro
a1a8e77b3a feat: update prompt-optimizer 2.9.3 2026-04-11 05:32:11 +08:00
arch3rPro
253b0d2005 feat(craft-agents): 升级应用版本至0.8.5并更新配置文件 2026-04-10 10:24:23 +08:00
arch3rPro
af89dd30a7 fix(craft-agents): 添加bun运行命令以允许不安全绑定
添加command配置以使用bun运行服务器并允许不安全绑定,确保服务正常启动
2026-04-10 10:21:07 +08:00
arch3rPro
6ad7b05160 feat(craft-agents): 添加craft-agents应用配置和文档
添加craft-agents应用的docker-compose配置、数据配置文件和README文档
2026-04-10 09:52:05 +08:00
arch3rPro
354fb12eba feat: update new-api 0.12.6 2026-04-10 05:31:34 +08:00
arch3rPro
a3be1dac39 feat: update new-api 0.12.6-allinone 2026-04-10 05:31:15 +08:00
arch3rPro
8e5b454640 feat: update new-api 0.12.5 2026-04-09 05:31:46 +08:00
arch3rPro
8c0e79a720 feat: update new-api 0.12.5-allinone 2026-04-09 05:31:24 +08:00
arch3rPro
d31e124a8e feat: update prompt-optimizer 2.9.2 2026-04-08 05:32:55 +08:00
arch3rPro
4df70ffc78 feat: update nzbget 26.1 2026-04-08 05:32:38 +08:00
arch3rPro
464e3656de feat: update new-api 0.12.3 2026-04-08 05:32:07 +08:00
arch3rPro
f5b5428851 feat: update new-api 0.12.3-allinone 2026-04-08 05:31:49 +08:00
arch3rPro
21cd2fe790 feat: update n8n-zh 2.16.0 2026-04-08 05:31:39 +08:00
arch3rPro
bee54bcdd4 feat: update gpt4free 7.4.7-slim 2026-04-08 05:31:08 +08:00
arch3rPro
3ef319c34b feat: update gpt4free 7.4.7 2026-04-08 05:30:58 +08:00
arch3rPro
2b21339f76 feat: update easytier 2.6.0 2026-04-08 05:30:37 +08:00
arch3rPro
1e8e28b50e feat: update prompt-optimizer 2.9.1 2026-04-07 05:33:01 +08:00
arch3rPro
50cfabba9a feat: update nezha 2.0.7 2026-04-07 05:32:25 +08:00
arch3rPro
a31df60b8c feat: update new-api 0.12.2 2026-04-07 05:31:51 +08:00
arch3rPro
269398241d feat: update new-api 0.12.2-allinone 2026-04-07 05:31:32 +08:00
arch3rPro
03633cdb8e feat: update gpt4free 7.4.3-slim 2026-04-07 05:30:55 +08:00
arch3rPro
0cd611e796 feat: update gpt4free 7.4.3 2026-04-07 05:30:45 +08:00
arch3rPro
b9eddadda4 feat: update qexo 4.1.1 2026-04-06 05:32:14 +08:00
arch3rPro
6fe13bd7bc feat: update docmost 0.71.1 2026-04-06 05:30:42 +08:00
arch3rPro
58593348cf feat: update beszel-agent 0.18.7 2026-04-06 05:30:17 +08:00
arch3rPro
a270dafeeb feat: update gpt4free 7.3.9-slim 2026-04-05 05:30:54 +08:00
arch3rPro
e8f5b20093 feat: update gpt4free 7.3.9 2026-04-05 05:30:44 +08:00
arch3rPro
cc3f716151 feat: update gpt4free 7.3.7-slim 2026-04-04 05:30:56 +08:00
arch3rPro
3ab8d1b61d feat: update gpt4free 7.3.7 2026-04-04 05:30:47 +08:00
arch3rPro
74b14d5feb feat(litellm): 添加v1.83.0-nightly版本并更新配置
更新latest版本的docker-compose.yml和data.yml配置,添加v1.83.0-nightly版本的配置文件
将LITELLM_MASTER_KEY字段类型改为password并更新默认值
2026-04-03 15:07:11 +08:00
arch3rPro
20ea51d3ec feat: 添加 LiteLLM 应用配置文件和部署文件
添加 LiteLLM 的配置文件、部署文件和文档,包括:
- Prometheus 监控配置
- Docker Compose 部署文件
- 应用元数据配置
- README 文档
2026-04-03 15:06:34 +08:00
arch3rPro
21b4089535 feat: update prompt-optimizer 2.8.0 2026-04-03 05:32:04 +08:00
arch3rPro
859940d036 feat: update new-api 0.12.1 2026-04-03 05:31:29 +08:00
arch3rPro
f3eae2feac feat: update new-api 0.12.1-allinone 2026-04-03 05:31:12 +08:00
arch3rPro
47fa6c4bca feat: update langflow 1.8.4 2026-04-03 05:30:55 +08:00
arch3rPro
2790ace79f docs: 在README中添加Sub2API、CLIProxyAPI Plus和Trae-Proxy项目介绍 2026-04-03 01:37:28 +08:00
arch3rPro
468cceabd9 refactor(sub2api): 重构配置管理和版本结构
移除硬编码的配置文件,改为使用环境变量
添加 0.1.106 稳定版本目录结构
更新 README 文档说明自动生成密码功能
2026-04-03 01:28:09 +08:00
arch3rPro
4259135298 feat(cliproxyapi-plus): 升级至v6.9.9-0并更新文档
- 添加6.9.9-0版本的docker-compose.yml和data.yml
- 删除6.9.5-0版本的旧配置文件
- 更新README文档,增加详细配置说明
- 修改端口变量名使其更具描述性
- 更新文档链接至最新帮助中心
2026-04-02 22:50:35 +08:00
arch3rPro
dc57fd7270 feat: update new-api 0.12.0 2026-04-02 05:31:39 +08:00
arch3rPro
a8d61e4d2c feat: update new-api 0.12.0-allinone 2026-04-02 05:31:18 +08:00
112 changed files with 1743 additions and 234 deletions

View File

@@ -246,7 +246,7 @@ docker run -d --name=nginx -p 80:80 nginx:latest
🚀 免费的GPT-4和其他大语言模型API接口
<kbd>7.3.5-slim</kbd> • [官网链接](https://github.com/xtekky/gpt4free)
<kbd>7.4.7-slim</kbd> • [官网链接](https://github.com/xtekky/gpt4free)
</td>
<td width="33%" align="center">
@@ -289,7 +289,7 @@ docker run -d --name=nginx -p 80:80 nginx:latest
轻量级文档管理系统,支持多人协作编辑与版本控制
<kbd>0.71.0</kbd> • [官网链接](https://github.com/docmost/docmost)
<kbd>0.71.1</kbd> • [官网链接](https://github.com/docmost/docmost)
</td>
<td width="33%" align="center">
@@ -313,7 +313,7 @@ docker run -d --name=nginx -p 80:80 nginx:latest
美观强大的在线静态博客管理器,支持多种平台
<kbd>4.0.1</kbd> • [官网链接](https://github.com/Qexo/Qexo)
<kbd>4.1.1</kbd> • [官网链接](https://github.com/Qexo/Qexo)
</td>
</tr>
@@ -383,7 +383,7 @@ docker run -d --name=nginx -p 80:80 nginx:latest
开源自托管个人笔记工具支持AI增强笔记检索
<kbd>1.8.6</kbd> • [官网链接](https://github.com/blinko-space/blinko)
<kbd>1.8.7</kbd> • [官网链接](https://github.com/blinko-space/blinko)
</td>
<td width="33%" align="center">
@@ -424,7 +424,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
开源Airtable替代品将任何数据库转换为智能电子表格
<kbd>0.301.5</kbd> • [官网链接](https://github.com/nocodb/nocodb)
<kbd>2026.04.0</kbd> • [官网链接](https://github.com/nocodb/nocodb)
</td>
<td width="33%" align="center">
@@ -455,7 +455,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
🌐 简单安全去中心化的内网穿透 VPN 组网方案
<kbd>2.5.0</kbd> • [官网链接](https://github.com/EasyTier/Easytier)
<kbd>2.6.0</kbd> • [官网链接](https://github.com/EasyTier/Easytier)
</td>
<td width="33%" align="center">
@@ -565,7 +565,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
🚀 强大的AI提示词优化工具支持多种主流大语言模型
<kbd>2.7.0</kbd> • [官网链接](https://github.com/arch3rPro/Prompt-Optimizer)
<kbd>2.9.3</kbd> • [官网链接](https://github.com/arch3rPro/Prompt-Optimizer)
</td>
</tr>
@@ -582,7 +582,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
🍥 新一代大模型网关与AI资产管理系统支持多种模型统一调用
<kbd>0.11.9</kbd> • [官网链接](https://docs.newapi.pro/)
<kbd>0.12.9</kbd> • [官网链接](https://docs.newapi.pro/)
</td>
<td width="33%" align="center">
@@ -635,7 +635,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
🔍 模型上下文协议调试工具支持MCP服务器测试与开发
<kbd>0.21.1</kbd> • [官网链接](https://github.com/modelcontextprotocol/inspector)
<kbd>0.21.2</kbd> • [官网链接](https://github.com/modelcontextprotocol/inspector)
</td>
<td width="33%" align="center">
@@ -705,7 +705,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
🔮 强大的AI应用构建平台可视化设计AI驱动的工作流和代理
<kbd>1.8.3</kbd> • [官网链接](https://langflow.org/)
<kbd>1.8.4</kbd> • [官网链接](https://langflow.org/)
</td>
<td width="33%" align="center">
@@ -729,7 +729,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
🔄 n8n汉化版具有原生AI能力的Fair-code工作流自动化平台
<kbd>2.15.0</kbd> • [官网链接](https://n8n.io/)
<kbd>2.17.0</kbd> • [官网链接](https://n8n.io/)
</td>
</tr>
@@ -764,6 +764,47 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
</tr>
</table>
<table>
<tr>
<td width="33%" align="center">
<a href="./apps/sub2api/README.md">
<img src="./apps/sub2api/logo.png" width="60" height="60" alt="Sub2API">
<br><b>Sub2API</b>
</a>
🍥 AI API 网关平台支持订阅配额分发、API Key 管理、计费和负载均衡
<kbd>0.1.106</kbd> • [官网链接](https://sub2api.org)
</td>
<td width="33%" align="center">
<a href="./apps/cliproxyapi-plus/README.md">
<img src="./apps/cliproxyapi-plus/logo.png" width="60" height="60" alt="CLIProxyAPI Plus">
<br><b>CLIProxyAPI Plus</b>
</a>
🔗 CLIProxyAPI Plus 代理API服务
<kbd>6.9.9-0</kbd> • [官网链接](https://github.com/router-for-me/CLIProxyAPIPlus)
</td>
<td width="33%" align="center">
<a href="./apps/trae-proxy/README.md">
<img src="./apps/trae-proxy/logo.png" width="60" height="60" alt="Trae-Proxy">
<br><b>Trae-Proxy</b>
</a>
🎯 一个智能的API代理工具专门用于拦截和重定向OpenAI API请求到自定义后端服务
<kbd>1.0.0</kbd> • [官网链接](https://github.com/arch3rPro/Trae-Proxy)
</td>
</tr>
</table>
#### 🎵 多媒体管理
<table>
@@ -818,7 +859,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
📥 高性能Usenet下载工具支持Web界面管理
<kbd>26.0</kbd> • [官网链接](https://nzbget.net/)
<kbd>26.1</kbd> • [官网链接](https://nzbget.net/)
</td>
<td width="33%" align="center">
@@ -861,7 +902,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
📊 开源轻量易用的服务器监控运维工具
<kbd>2.0.6</kbd> • [官网链接](https://github.com/naiba/nezha/)
<kbd>2.0.7</kbd> • [官网链接](https://github.com/naiba/nezha/)
</td>
<td width="33%" align="center">
@@ -955,7 +996,7 @@ AI驱动的开源代码知识库与文档协作平台支持多模型、多数
⚡ 轻量级服务器监控代理,支持实时性能数据收集
<kbd>0.18.6</kbd> • [官网链接](https://github.com/henrygd/beszel)
<kbd>0.18.7</kbd> • [官网链接](https://github.com/henrygd/beszel)
</td>
<td width="33%" align="center">

View File

@@ -0,0 +1,28 @@
additionalProperties:
formFields:
- default: 8090
edit: true
envKey: PANEL_APP_PORT_HTTP
labelEn: Web Port
labelZh: Web端口
required: true
rule: paramPort
type: number
label:
en: Web Port
zh: Web端口
ja: Webポート
ko: Web 포트
- default: ""
edit: true
envKey: AXONHUB_DB_PASSWORD
labelEn: Database Password
labelZh: 数据库密码
required: false
rule: paramComplexity
type: password
label:
en: Database Password
zh: 数据库密码
ja: データベースパスワード
ko: 데이터베이스 비밀번호

View File

@@ -0,0 +1,20 @@
services:
axonhub:
image: looplj/axonhub:v0.9.31
container_name: ${CONTAINER_NAME}
restart: always
networks:
- 1panel-network
ports:
- "${PANEL_APP_PORT_HTTP}:8090"
volumes:
- ./data:/data
environment:
- TZ=Asia/Shanghai
- AXONHUB_DB_DIALECT=sqlite3
- AXONHUB_DB_DSN=file:/data/axonhub.db?cache=shared&_fk=1&pragma=journal_mode(WAL)
labels:
createdBy: "Apps"
networks:
1panel-network:
external: true

79
apps/axonhub/README.md Normal file
View File

@@ -0,0 +1,79 @@
# AxonHub
一站式AI开发平台 - 统一API网关支持多种LLM提供商。
## 功能特点
- 🔄 **任意SDK调用任意模型** - 使用OpenAI SDK调用Claude或使用Anthropic SDK调用GPT零代码修改
- 🔍 **完整请求追踪** - 线程感知的可观测性,完整的请求时间线,快速调试
- 🔐 **企业级RBAC** - 细粒度访问控制、使用配额和数据隔离
-**智能负载均衡** - <100ms自动故障转移始终路由到最健康的通道
- 💰 **实时成本追踪** - 每请求成本分解,输入、输出、缓存令牌全追踪
## 支持的LLM提供商
- OpenAI (GPT-4, GPT-4o, GPT-5等)
- Anthropic (Claude 3.5, Claude 3.0等)
- Zhipu AI (GLM-4.5, GLM-4.5-air等)
- Moonshot AI/Kimi (kimi-k2等)
- DeepSeek (DeepSeek-V3.1等)
- ByteDance Doubao (doubao-1.6等)
- Gemini (Gemini 2.5等)
- Fireworks (MiniMax-M2.5, GLM-5, Kimi K2.5等)
- Jina AI (Embeddings, Reranker等)
- OpenRouter (多种模型)
- ZAI (图像生成)
- AWS Bedrock (Claude on AWS)
- Google Cloud (Claude on GCP)
- NanoGPT (多种模型, 图像生成)
## 使用说明
### 首次访问
1. 访问 `http://<服务器IP>:8090`
2. 按照设置向导创建管理员账户密码至少6位
3. 登录后配置AI提供商的API密钥
4. 创建API密钥开始使用
### 默认端口
- Web界面: 8090
### 数据库配置
默认使用SQLite数据库数据存储在 `./data` 目录。
如需使用PostgreSQL或MySQL请参考官方文档配置环境变量
- `AXONHUB_DB_DIALECT`: 数据库类型 (postgres/mysql/sqlite3)
- `AXONHUB_DB_DSN`: 数据库连接字符串
### 数据目录
应用数据存储在 `./data` 目录。
## 快速开始
### 使用OpenAI SDK调用Claude
```python
from openai import OpenAI
client = OpenAI(
base_url="http://localhost:8090/v1", # 指向AxonHub
api_key="your-axonhub-api-key" # 使用AxonHub API密钥
)
# 使用OpenAI SDK调用Claude
response = client.chat.completions.create(
model="claude-3-5-sonnet", # 或 gpt-4, gemini-pro, deepseek-chat...
messages=[{"role": "user", "content": "Hello!"}]
)
```
## 相关链接
- 官方网站: https://github.com/looplj/axonhub
- GitHub: https://github.com/looplj/axonhub
- 文档: https://github.com/looplj/axonhub#readme
- Demo: https://axonhub.onrender.com (Email: demo@example.com, Password: 12345678)

29
apps/axonhub/data.yml Normal file
View File

@@ -0,0 +1,29 @@
name: AxonHub
tags:
- 开发工具
- AI工具
title: 一站式AI开发平台 - 统一API网关支持多种LLM提供商
description: 一站式AI开发平台 - 统一API网关支持多种LLM提供商
additionalProperties:
key: axonhub
name: AxonHub
tags:
- DevTool
- AI
shortDescZh: 一站式AI开发平台 - 统一API网关
shortDescEn: All-in-one AI Development Platform - Unified API Gateway
description:
en: AxonHub is an AI gateway that lets you switch between model providers without changing a single line of code. Use any SDK to call 100+ LLMs. Built-in failover, load balancing, cost control & end-to-end tracing.
zh: AxonHub是一个AI网关让您无需修改任何代码即可在模型提供商之间切换。使用任何SDK调用100+个LLM。内置故障转移、负载均衡、成本控制和端到端追踪。
ja: AxonHubは、コードを1行も変更せずにモデルプロバイダー間を切り替えることができるAIゲートウェイです。任意のSDKを使用して100以上のLLMを呼び出します。フェイルオーバー、負荷分散、コスト制御、エンドツーエンドのトレースを内蔵。
ko: AxonHub는 코드를 한 줄도 변경하지 않고 모델 제공자 간에 전환할 수 있는 AI 게이트웨이입니다. 모든 SDK를 사용하여 100개 이상의 LLM을 호출합니다. 장애 조치, 로드 밸런싱, 비용 제어 및 엔드 투 엔드 추적이 내장되어 있습니다.
type: website
crossVersionUpdate: true
limit: 0
recommend: 0
website: https://github.com/looplj/axonhub
github: https://github.com/looplj/axonhub
document: https://github.com/looplj/axonhub#readme
architectures:
- amd64
- arm64

View File

@@ -0,0 +1,28 @@
additionalProperties:
formFields:
- default: 8090
edit: true
envKey: PANEL_APP_PORT_HTTP
labelEn: Web Port
labelZh: Web端口
required: true
rule: paramPort
type: number
label:
en: Web Port
zh: Web端口
ja: Webポート
ko: Web 포트
- default: ""
edit: true
envKey: AXONHUB_DB_PASSWORD
labelEn: Database Password
labelZh: 数据库密码
required: false
rule: paramComplexity
type: password
label:
en: Database Password
zh: 数据库密码
ja: データベースパスワード
ko: 데이터베이스 비밀번호

View File

@@ -0,0 +1,20 @@
services:
axonhub:
image: looplj/axonhub:latest
container_name: ${CONTAINER_NAME}
restart: always
networks:
- 1panel-network
ports:
- "${PANEL_APP_PORT_HTTP}:8090"
volumes:
- ./data:/data
environment:
- TZ=Asia/Shanghai
- AXONHUB_DB_DIALECT=sqlite3
- AXONHUB_DB_DSN=file:/data/axonhub.db?cache=shared&_fk=1&pragma=journal_mode(WAL)
labels:
createdBy: "Apps"
networks:
1panel-network:
external: true

BIN
apps/axonhub/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

View File

@@ -1,6 +1,6 @@
services:
beszel-agent:
image: henrygd/beszel-agent:0.18.6
image: henrygd/beszel-agent:0.18.7
container_name: ${CONTAINER_NAME}
restart: always
network_mode: host

View File

@@ -1,6 +1,6 @@
services:
blinko:
image: blinkospace/blinko:1.8.6
image: blinkospace/blinko:1.8.7
container_name: ${CONTAINER_NAME}
restart: always
networks:

View File

@@ -2,49 +2,49 @@ additionalProperties:
formFields:
- default: 8317
edit: true
envKey: PANEL_APP_PORT_8317
labelEn: Port 8317
labelZh: 端口 8317
envKey: PANEL_APP_PORT_HTTP
labelEn: Web UI Port
labelZh: Web界面端口
required: true
rule: paramPort
type: number
- default: 8085
edit: true
envKey: PANEL_APP_PORT_8085
labelEn: Port 8085
labelZh: 端口 8085
envKey: PANEL_APP_PORT_PROXY
labelEn: Proxy Port
labelZh: 代理端口
required: true
rule: paramPort
type: number
- default: 1455
edit: true
envKey: PANEL_APP_PORT_1455
labelEn: Port 1455
labelZh: 端口 1455
labelEn: Additional Port 1455
labelZh: 额外端口 1455
required: true
rule: paramPort
type: number
- default: 54545
edit: true
envKey: PANEL_APP_PORT_54545
labelEn: Port 54545
labelZh: 端口 54545
labelEn: Additional Port 54545
labelZh: 额外端口 54545
required: true
rule: paramPort
type: number
- default: 51121
edit: true
envKey: PANEL_APP_PORT_51121
labelEn: Port 51121
labelZh: 端口 51121
labelEn: Additional Port 51121
labelZh: 额外端口 51121
required: true
rule: paramPort
type: number
- default: 11451
edit: true
envKey: PANEL_APP_PORT_11451
labelEn: Port 11451
labelZh: 端口 11451
labelEn: Additional Port 11451
labelZh: 额外端口 11451
required: true
rule: paramPort
type: number
@@ -54,4 +54,4 @@ additionalProperties:
labelEn: Time Zone
labelZh: 时区
required: true
type: text
type: text

View File

@@ -1,17 +1,17 @@
services:
cliproxyapi-plus:
image: eceasy/cli-proxy-api-plus:v6.9.5-0
image: eceasy/cli-proxy-api-plus:v6.9.9-0
container_name: ${CONTAINER_NAME}
restart: always
networks:
- 1panel-network
ports:
- ${PANEL_APP_PORT_8317}:8317
- ${PANEL_APP_PORT_8085}:8085
- ${PANEL_APP_PORT_1455}:1455
- ${PANEL_APP_PORT_54545}:54545
- ${PANEL_APP_PORT_51121}:51121
- ${PANEL_APP_PORT_11451}:11451
- "${PANEL_APP_PORT_HTTP}:8317"
- "${PANEL_APP_PORT_PROXY}:8085"
- "${PANEL_APP_PORT_1455}:1455"
- "${PANEL_APP_PORT_54545}:54545"
- "${PANEL_APP_PORT_51121}:51121"
- "${PANEL_APP_PORT_11451}:11451"
volumes:
- ./data/config.yaml:/CLIProxyAPI/config.yaml
- ./data/auths:/root/.cli-proxy-api
@@ -23,4 +23,4 @@ services:
networks:
1panel-network:
external: true
external: true

View File

@@ -1,29 +1,105 @@
# CLIProxyAPI Plus
CLIProxyAPI Plus 代理API服务基于主线项目添加第三方提供商支持。
CLIProxyAPI Plus 是 CLIProxyAPI 的增强版本,在主线项目基础上添加第三方提供商支持。所有第三方提供商支持由社区贡献者维护。
## 功能特点
- 支持多种代理协议
- 支持第三方提供商
- 高性能设计
- 易于部署和使用
- 支持多种 AI 模型提供商Claude、Gemini、Codex、Qwen 等)
- 支持第三方提供商扩展
- OAuth 认证支持
- 高性能代理设计
- Web 管理界面
- 灵活的路由和负载均衡策略
## 使用说明
### 默认端口
- Web 界面: 8317
- API 端口: 8085
- 其他端口: 1455, 54545, 51121, 11451
- **Web UI 端口 (8317)**: 主要的 Web 管理界面和 API 端口
- Web 管理界面: `http://localhost:8317/management.html`
- API 端点: `http://localhost:8317/v1`
- **代理端口 (8085)**: 代理服务端口
- **额外端口**: 1455, 54545, 51121, 11451用于特定功能扩展
### 数据目录
### Web 管理界面
部署后,访问 Web 管理界面需要以下步骤:
1. **编辑配置文件** `./data/config.yaml`
-`remote-management.allow-remote` 设置为 `true` 以允许远程访问
- 设置 `remote-management.secret-key` 为您的管理密钥
2. **访问地址**(替换为您的服务器 IP
```
http://your-server-ip:8317/management.html
```
**注意**:默认 `allow-remote``false`,仅允许本地访问。如需从其他机器访问,请务必设置为 `true` 并配置强密码。
### 配置文件
应用数据存储在 `./data` 目录,包含:
- `config.yaml` - 配置文件
- `auths/` - 认证信息目录
- `logs/` - 日志目录
- `config.yaml` - 主配置文件
- API 密钥配置
- 提供商设置
- 路由策略
- 代理设置
- `auths/` - OAuth 认证信息存储目录
- `logs/` - 应用日志目录
### 快速配置
1. 编辑 `./data/config.yaml` 文件
2.`api-keys` 部分添加您的 API 密钥
3. 如需远程访问,设置 `remote-management.allow-remote: true``remote-management.secret-key`
4. 根据需要配置各个提供商Claude、Gemini、Codex 等)
5. 重启应用使配置生效
### 主要配置项
```yaml
# API 密钥
api-keys:
- 'your-api-key-1'
- 'your-api-key-2'
# 管理界面设置
remote-management:
allow-remote: false # 是否允许远程管理true=允许false=仅本地
secret-key: '' # 管理密钥(首次启动后会被哈希)
disable-control-panel: false
# 代理设置
proxy-url: "" # 全局代理 URL
# 路由策略
routing:
strategy: 'round-robin' # round-robin 或 fill-first
```
## 版本说明
- **latest**: 最新开发版本
- **6.9.9-0**: 最新稳定版本(推荐)
## 相关链接
- GitHub: https://github.com/router-for-me/CLIProxyAPIPlus
- 官方文档: https://help.router-for.me/cn/introduction/quick-start.html
- Web UI 文档: https://help.router-for.me/cn/management/webui.html
- GitHub: https://github.com/router-for-me/CLIProxyAPIPlus
- 问题反馈: https://github.com/router-for-me/CLIProxyAPIPlus/issues
## 注意事项
1. 首次部署后请及时修改 `api-keys` 和管理密钥
2. 如需远程访问,请设置 `allow-remote: true` 并配置强密码
3. 生产环境建议在使用完毕后将 `allow-remote` 改回 `false` 以提高安全性
4. 如需使用 OAuth 认证,请确保 `auths/` 目录有正确的读写权限
5. 生产环境建议配置 TLS 加密
## 技术支持
- 主线项目问题: 请在主线仓库提交 Issue
- Plus 版本第三方提供商问题: 请联系相应的社区维护者

View File

@@ -1,29 +1,105 @@
# CLIProxyAPI Plus
CLIProxyAPI Plus proxy API service with third-party provider support based on the mainline project.
CLIProxyAPI Plus is an enhanced version of CLIProxyAPI, adding support for third-party providers on top of the mainline project. All third-party provider support is maintained by community contributors.
## Features
- Support multiple proxy protocols
- Support third-party providers
- High performance design
- Easy to deploy and use
- Support for multiple AI model providers (Claude, Gemini, Codex, Qwen, etc.)
- Third-party provider extensions
- OAuth authentication support
- High-performance proxy design
- Web management interface
- Flexible routing and load balancing strategies
## Usage
### Default Ports
- Web UI: 8317
- API Port: 8085
- Other Ports: 1455, 54545, 51121, 11451
- **Web UI Port (8317)**: Primary Web management interface and API port
- Web Management UI: `http://localhost:8317/management.html`
- API Endpoint: `http://localhost:8317/v1`
- **Proxy Port (8085)**: Proxy service port
- **Additional Ports**: 1455, 54545, 51121, 11451 (for specific feature extensions)
### Data Directory
### Web Management Interface
To access the Web management interface after deployment:
1. **Edit config file** `./data/config.yaml`
- Set `remote-management.allow-remote` to `true` to allow remote access
- Set `remote-management.secret-key` to your management secret
2. **Access URL** (replace with your server IP):
```
http://your-server-ip:8317/management.html
```
**Note**: Default `allow-remote` is `false`, only local access is allowed. To access from other machines, please set to `true` and configure a strong password.
### Configuration Files
Application data is stored in the `./data` directory:
- `config.yaml` - Configuration file
- `auths/` - Authentication directory
- `logs/` - Logs directory
- `config.yaml` - Main configuration file
- API key configuration
- Provider settings
- Routing strategy
- Proxy settings
- `auths/` - OAuth authentication storage directory
- `logs/` - Application logs directory
### Quick Configuration
1. Edit the `./data/config.yaml` file
2. Add your API keys in the `api-keys` section
3. For remote access, set `remote-management.allow-remote: true` and `remote-management.secret-key`
4. Configure providers as needed (Claude, Gemini, Codex, etc.)
5. Restart the application for changes to take effect
### Key Configuration Items
```yaml
# API Keys
api-keys:
- 'your-api-key-1'
- 'your-api-key-2'
# Management interface settings
remote-management:
allow-remote: false # Allow remote management, true=allow, false=local only
secret-key: '' # Management key (will be hashed after first startup)
disable-control-panel: false
# Proxy settings
proxy-url: "" # Global proxy URL
# Routing strategy
routing:
strategy: 'round-robin' # round-robin or fill-first
```
## Version Information
- **latest**: Latest development version
- **6.9.9-0**: Latest stable version (recommended)
## Links
- GitHub: https://github.com/router-for-me/CLIProxyAPIPlus
- Official Documentation: https://help.router-for.me/cn/introduction/quick-start.html
- Web UI Documentation: https://help.router-for.me/cn/management/webui.html
- GitHub: https://github.com/router-for-me/CLIProxyAPIPlus
- Issue Tracker: https://github.com/router-for-me/CLIProxyAPIPlus/issues
## Important Notes
1. Please modify `api-keys` and management key promptly after first deployment
2. For remote access, set `allow-remote: true` and configure a strong password
3. In production, it's recommended to set `allow-remote` back to `false` after use for better security
4. For OAuth authentication, ensure the `auths/` directory has proper read/write permissions
5. TLS encryption is recommended for production environments
## Support
- Mainline project issues: Please submit issues to the mainline repository
- Plus version third-party provider issues: Please contact the corresponding community maintainer

View File

@@ -18,7 +18,7 @@ additionalProperties:
recommend: 0
website: https://github.com/router-for-me/CLIProxyAPIPlus
github: https://github.com/router-for-me/CLIProxyAPIPlus
document: https://github.com/router-for-me/CLIProxyAPIPlus
document: https://help.router-for.me/cn/introduction/quick-start.html
architectures:
- amd64
- arm64
- arm64

View File

@@ -2,49 +2,49 @@ additionalProperties:
formFields:
- default: 8317
edit: true
envKey: PANEL_APP_PORT_8317
labelEn: Port 8317
labelZh: 端口 8317
envKey: PANEL_APP_PORT_HTTP
labelEn: Web UI Port
labelZh: Web界面端口
required: true
rule: paramPort
type: number
- default: 8085
edit: true
envKey: PANEL_APP_PORT_8085
labelEn: Port 8085
labelZh: 端口 8085
envKey: PANEL_APP_PORT_PROXY
labelEn: Proxy Port
labelZh: 代理端口
required: true
rule: paramPort
type: number
- default: 1455
edit: true
envKey: PANEL_APP_PORT_1455
labelEn: Port 1455
labelZh: 端口 1455
labelEn: Additional Port 1455
labelZh: 额外端口 1455
required: true
rule: paramPort
type: number
- default: 54545
edit: true
envKey: PANEL_APP_PORT_54545
labelEn: Port 54545
labelZh: 端口 54545
labelEn: Additional Port 54545
labelZh: 额外端口 54545
required: true
rule: paramPort
type: number
- default: 51121
edit: true
envKey: PANEL_APP_PORT_51121
labelEn: Port 51121
labelZh: 端口 51121
labelEn: Additional Port 51121
labelZh: 额外端口 51121
required: true
rule: paramPort
type: number
- default: 11451
edit: true
envKey: PANEL_APP_PORT_11451
labelEn: Port 11451
labelZh: 端口 11451
labelEn: Additional Port 11451
labelZh: 额外端口 11451
required: true
rule: paramPort
type: number

View File

@@ -6,12 +6,12 @@ services:
networks:
- 1panel-network
ports:
- ${PANEL_APP_PORT_8317}:8317
- ${PANEL_APP_PORT_8085}:8085
- ${PANEL_APP_PORT_1455}:1455
- ${PANEL_APP_PORT_54545}:54545
- ${PANEL_APP_PORT_51121}:51121
- ${PANEL_APP_PORT_11451}:11451
- "${PANEL_APP_PORT_HTTP}:8317"
- "${PANEL_APP_PORT_PROXY}:8085"
- "${PANEL_APP_PORT_1455}:1455"
- "${PANEL_APP_PORT_54545}:54545"
- "${PANEL_APP_PORT_51121}:51121"
- "${PANEL_APP_PORT_11451}:11451"
volumes:
- ./data/config.yaml:/CLIProxyAPI/config.yaml
- ./data/auths:/root/.cli-proxy-api

View File

@@ -0,0 +1,29 @@
additionalProperties:
formFields:
- default: 9100
edit: true
envKey: PANEL_APP_PORT_HTTP
labelEn: Web Port
labelZh: Web端口
required: true
rule: paramPort
type: number
label:
en: Web Port
zh: Web端口
ja: Webポート
ko: Web 포트
- default: "Craft-Agents-"
edit: true
envKey: CRAFT_SERVER_TOKEN
labelEn: Server Token
labelZh: 服务器令牌
random: true
required: true
rule: paramComplexity
type: password
label:
en: Server Token
zh: 服务器令牌
ja: サーバートークン
ko: 서버 토큰

View File

@@ -0,0 +1,28 @@
services:
craft-agents:
image: ghcr.io/lukilabs/craft-agents-server:0.8.5
container_name: ${CONTAINER_NAME}
restart: always
networks:
- 1panel-network
ports:
- "${PANEL_APP_PORT_HTTP}:9100"
volumes:
- craft-agents-data:/home/craftagents/.craft-agent
environment:
- TZ=Asia/Shanghai
- CRAFT_SERVER_TOKEN=${CRAFT_SERVER_TOKEN}
- CRAFT_RPC_HOST=0.0.0.0
command:
- bun
- run
- packages/server/src/index.ts
- --allow-insecure-bind
labels:
createdBy: "Apps"
volumes:
craft-agents-data:
driver: local
networks:
1panel-network:
external: true

226
apps/craft-agents/README.md Normal file
View File

@@ -0,0 +1,226 @@
# Craft Agents
Craft Agents 是一个强大的 AI Agent 工作空间,支持多种 LLM 提供商和 MCP 集成。
## 功能特点
- **多 LLM 提供商支持**:支持 Anthropic、Google AI Studio、ChatGPT Plus、GitHub Copilot 等多种 AI 提供商
- **MCP 集成**:支持连接 MCP 服务器、REST API 和本地文件系统
- **多会话管理**:具有收件箱/归档功能,支持会话标记和状态工作流
- **权限模式**:三级权限系统(探索、询问编辑、自动),可自定义规则
- **动态状态系统**:可自定义会话工作流状态(待办、进行中、完成等)
- **自动化**:支持事件驱动的自动化,可基于标签变化、计划任务、工具使用等触发
- **无头服务器模式**:可作为远程服务器运行,桌面应用作为瘦客户端连接
- **Web UI**:内置 Web 界面,可通过浏览器访问和管理
## 使用说明
### 默认端口
- Web 界面/RPC 端口: 9100
### 配置说明
#### 必需参数
- **服务器令牌 (CRAFT_SERVER_TOKEN)**:用于客户端认证的 Bearer 令牌,系统会自动生成格式为 `Craft-Agents-<随机复杂密码>` 的安全令牌,您也可以自定义
#### 可选参数
- **Web 端口 (PANEL_APP_PORT_HTTP)**Web界面访问端口默认为 `9100`
#### 安全说明
⚠️ **重要提示**:本应用默认使用 `--allow-insecure-bind` 参数启动,允许在内网环境下使用非加密的 `ws://` 协议。这适用于以下场景:
-**内网环境**:应用运行在受信任的内网环境中
-**反向代理**:通过 Nginx/Caddy 等反向代理处理 TLS
-**公网直接暴露**:不推荐直接暴露到公网
**生产环境建议**
- 使用反向代理(如 Nginx、Caddy处理 TLS 加密
- 或在容器内配置 TLS 证书(设置环境变量 `CRAFT_RPC_TLS_CERT``CRAFT_RPC_TLS_KEY`),并移除 `--allow-insecure-bind` 参数
**数据存储**应用数据存储在Docker命名卷中由Docker自动管理权限无需手动配置。
### 连接方式
#### 通过 Web UI 访问
部署后,通过浏览器访问 `http://<服务器IP>:9100`,使用设置的服务器令牌登录。
#### 通过桌面应用连接
在 Craft Agents 桌面应用中,配置远程工作空间:
- URL: `ws://<服务器IP>:9100``wss://<服务器IP>:9100`(启用 TLS 时)
- Token: 部署时设置的服务器令牌
### 数据目录
应用数据存储在Docker命名卷 `craft-agents-data` 中,映射到容器内的 `/home/craftagents/.craft-agent` 目录,包括:
- 配置文件
- 会话数据
- 工作空间设置
- 技能和源配置
**数据管理**
- 数据卷由Docker自动管理无需手动设置权限
- 数据会持久化保存,即使容器删除也不会丢失
- 可以通过 `docker volume inspect craft-agents-data` 查看数据位置
### 安全访问方式
根据官方文档,推荐以下几种安全访问方式:
#### 方式1Tailscale推荐
Tailscale 创建设备间的私有网格网络,无需端口转发、证书或防火墙规则。
**优势**
- ✅ 无需配置TLS证书
- ✅ 端到端加密
- ✅ 服务器只能从您的Tailscale网络访问
**配置方法**
```yaml
environment:
- CRAFT_RPC_HOST=100.x.y.z # Tailscale IP
```
#### 方式2反向代理nginx, Caddy
标准的生产部署方式反向代理处理TLS终止和访问控制。
**Caddy 示例**自动HTTPS
```
craft.example.com {
reverse_proxy localhost:9100
}
```
**Nginx 示例**
```nginx
server {
listen 443 ssl;
server_name craft.example.com;
ssl_certificate /path/to/cert.pem;
ssl_certificate_key /path/to/key.pem;
location / {
proxy_pass http://localhost:9100;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
}
}
```
使用反向代理时,应用绑定到 localhost
```yaml
environment:
- CRAFT_RPC_HOST=127.0.0.1
```
#### 方式3Cloudflare Tunnel
无需开放端口或管理证书通过HTTPS暴露服务。
**快速隧道**即时HTTPS URL
```bash
cloudflared tunnel --url http://localhost:9100
```
会生成一个 `https://<random>.trycloudflare.com` URL。
**永久自定义域名**
```bash
# 一次性设置
cloudflared tunnel login
cloudflared tunnel create craft-agents
cloudflared tunnel route dns craft-agents agents.yourdomain.com
# 运行隧道
cloudflared tunnel run --url http://localhost:9100 craft-agents
```
#### 方式4SSH隧道
快速临时访问,无需任何设置:
```bash
# 在客户端转发本地端口9100到远程服务器
ssh -L 9100:localhost:9100 user@your-server
```
然后从桌面应用或浏览器连接到 `ws://localhost:9100`
#### 方式5直接配置TLS证书
如需直接在应用中启用TLS需要
1. **生成证书**(参考官方文档):
```bash
# 使用官方脚本生成开发证书
./scripts/generate-dev-cert.sh
```
2. **修改 docker-compose.yml**
```yaml
volumes:
- craft-agents-data:/home/craftagents/.craft-agent
- ./certs:/certs:ro # 挂载证书目录
environment:
- CRAFT_SERVER_TOKEN=${CRAFT_SERVER_TOKEN}
- CRAFT_RPC_HOST=0.0.0.0
- CRAFT_RPC_TLS_CERT=/certs/cert.pem
- CRAFT_RPC_TLS_KEY=/certs/key.pem
```
3. **移除 `--allow-insecure-bind` 参数**
#### 访问方式
- **启用TLS后**
- Web UI: `https://192.168.123.201:9100`
- 桌面客户端: `wss://192.168.123.201:9100`
- **不启用TLS仅内网测试**
- Web UI: `http://192.168.123.201:9100`
- 桌面客户端: 可能无法连接浏览器API限制
**推荐顺序**
1. Tailscale最简单安全
2. 反向代理(标准生产方案)
3. Cloudflare Tunnel无需端口转发
4. SSH隧道临时访问
5. 直接TLS配置不推荐
## 支持的 LLM 提供商
### 直接连接
- **Anthropic**API 密钥或 Claude Max/Pro OAuth
- **Google AI Studio**API 密钥
- **ChatGPT Plus / Pro**Codex OAuth
- **GitHub Copilot**OAuth设备代码
### 第三方提供商
通过自定义端点支持:
- OpenRouter
- Vercel AI Gateway
- Ollama本地模型
- 其他 OpenAI 兼容端点
## 相关链接
- 官方网站: https://agents.craft.do
- GitHub: https://github.com/lukilabs/craft-agents-oss
- 文档: https://github.com/lukilabs/craft-agents-oss#readme
## 许可证
Apache License 2.0

View File

@@ -0,0 +1,31 @@
name: Craft Agents
tags:
- 开发工具
- AI助手
title: AI Agent工作空间支持多LLM提供商和MCP集成
description: AI Agent工作空间支持多LLM提供商和MCP集成
additionalProperties:
key: craft-agents
name: Craft Agents
tags:
- DevTool
- AI
shortDescZh: AI Agent工作空间支持多LLM提供商和MCP集成
shortDescEn: AI Agent workspace with multi-LLM provider support and MCP integration
type: website
crossVersionUpdate: true
limit: 0
recommend: 0
website: https://agents.craft.do
github: https://github.com/lukilabs/craft-agents-oss
document: https://github.com/lukilabs/craft-agents-oss#readme
architectures:
- amd64
- arm64
description:
en: Craft Agents is an AI agent workspace that supports multiple LLM providers (Anthropic, Google AI Studio, ChatGPT Plus, GitHub Copilot) and MCP integration. It features multi-session management, dynamic status workflow, and can run as a headless server.
zh: Craft Agents是一个AI Agent工作空间支持多种LLM提供商Anthropic、Google AI Studio、ChatGPT Plus、GitHub Copilot和MCP集成。它具有多会话管理、动态状态工作流等功能可以作为无头服务器运行。
zh-Hant: Craft Agents是一個AI Agent工作空間支持多種LLM提供商Anthropic、Google AI Studio、ChatGPT Plus、GitHub Copilot和MCP集成。它具有多會話管理、動態狀態工作流等功能可以作為無頭服務器運行。
ja: Craft Agentsは、複数のLLMプロバイダーAnthropic、Google AI Studio、ChatGPT Plus、GitHub CopilotとMCP統合をサポートするAIエージェントワークスペースです。マルチセッション管理、動的ステータスワークフローなどの機能を備え、ヘッドレスサーバーとして実行できます。
ko: Craft Agents는 여러 LLM 제공자(Anthropic, Google AI Studio, ChatGPT Plus, GitHub Copilot)와 MCP 통합을 지원하는 AI 에이전트 워크스페이스입니다. 다중 세션 관리, 동적 상태 워크플로 등의 기능을 갖추고 있으며 헤드리스 서버로 실행할 수 있습니다.
memoryRequired: 512

View File

@@ -0,0 +1,29 @@
additionalProperties:
formFields:
- default: 9100
edit: true
envKey: PANEL_APP_PORT_HTTP
labelEn: Web Port
labelZh: Web端口
required: true
rule: paramPort
type: number
label:
en: Web Port
zh: Web端口
ja: Webポート
ko: Web 포트
- default: "Craft-Agents-"
edit: true
envKey: CRAFT_SERVER_TOKEN
labelEn: Server Token
labelZh: 服务器令牌
random: true
required: true
rule: paramComplexity
type: password
label:
en: Server Token
zh: 服务器令牌
ja: サーバートークン
ko: 서버 토큰

View File

@@ -0,0 +1,28 @@
services:
craft-agents:
image: ghcr.io/lukilabs/craft-agents-server:latest
container_name: ${CONTAINER_NAME}
restart: always
networks:
- 1panel-network
ports:
- "${PANEL_APP_PORT_HTTP}:9100"
volumes:
- craft-agents-data:/home/craftagents/.craft-agent
environment:
- TZ=Asia/Shanghai
- CRAFT_SERVER_TOKEN=${CRAFT_SERVER_TOKEN}
- CRAFT_RPC_HOST=0.0.0.0
command:
- bun
- run
- packages/server/src/index.ts
- --allow-insecure-bind
labels:
createdBy: "Apps"
volumes:
craft-agents-data:
driver: local
networks:
1panel-network:
external: true

BIN
apps/craft-agents/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -11,7 +11,7 @@ services:
APP_SECRET: 52f235dee223c92a83a934ada13b83075c9855fe966b3cbf9dd86810e2b742ee
DATABASE_URL: postgresql://docmost:${PANEL_DB_USER_PASSWORD}@db:5432/docmost?schema=public
REDIS_URL: redis://redis:6379
image: docmost/docmost:0.71.0
image: docmost/docmost:0.71.1
labels:
createdBy: Apps
depends_on:

View File

@@ -1,6 +1,6 @@
services:
easytier:
image: easytier/easytier:v2.5.0
image: easytier/easytier:v2.6.0
container_name: ${CONTAINER_NAME}
restart: always
network_mode: host

View File

@@ -1,6 +1,6 @@
services:
gpt4free:
image: hlohaus789/g4f:v7.3.5-slim
image: hlohaus789/g4f:v7.4.7-slim
container_name: ${CONTAINER_NAME}
restart: always
networks:

View File

@@ -1,6 +1,6 @@
services:
gpt4free:
image: hlohaus789/g4f:v7.3.5
image: hlohaus789/g4f:v7.4.7
container_name: ${CONTAINER_NAME}
restart: always
networks:

View File

@@ -1,6 +1,6 @@
services:
inspector:
image: ghcr.io/modelcontextprotocol/inspector:0.21.1
image: ghcr.io/modelcontextprotocol/inspector:0.21.2
container_name: ${CONTAINER_NAME}
restart: always
networks:

View File

@@ -1,6 +1,6 @@
services:
langflow:
image: langflowai/langflow:1.8.3
image: langflowai/langflow:1.8.4
container_name: ${CONTAINER_NAME}
pull_policy: always
user: root

444
apps/litellm/README.md Normal file
View File

@@ -0,0 +1,444 @@
<h1 align="center">
🚅 LiteLLM
</h1>
<p align="center">
<p align="center">
<a href="https://render.com/deploy?repo=https://github.com/BerriAI/litellm" target="_blank" rel="nofollow"><img src="https://render.com/images/deploy-to-render-button.svg" alt="Deploy to Render"></a>
<a href="https://railway.app/template/HLP0Ub?referralCode=jch2ME">
<img src="https://railway.app/button.svg" alt="Deploy on Railway">
</a>
</p>
<p align="center">Call all LLM APIs using the OpenAI format [Bedrock, Huggingface, VertexAI, TogetherAI, Azure, OpenAI, Groq etc.]
<br>
</p>
<h4 align="center"><a href="https://docs.litellm.ai/docs/simple_proxy" target="_blank">LiteLLM Proxy Server (LLM Gateway)</a> | <a href="https://docs.litellm.ai/docs/hosted" target="_blank"> Hosted Proxy (Preview)</a> | <a href="https://docs.litellm.ai/docs/enterprise"target="_blank">Enterprise Tier</a></h4>
<h4 align="center">
<a href="https://pypi.org/project/litellm/" target="_blank">
<img src="https://img.shields.io/pypi/v/litellm.svg" alt="PyPI Version">
</a>
<a href="https://www.ycombinator.com/companies/berriai">
<img src="https://img.shields.io/badge/Y%20Combinator-W23-orange?style=flat-square" alt="Y Combinator W23">
</a>
<a href="https://wa.link/huol9n">
<img src="https://img.shields.io/static/v1?label=Chat%20on&message=WhatsApp&color=success&logo=WhatsApp&style=flat-square" alt="Whatsapp">
</a>
<a href="https://discord.gg/wuPM9dRgDw">
<img src="https://img.shields.io/static/v1?label=Chat%20on&message=Discord&color=blue&logo=Discord&style=flat-square" alt="Discord">
</a>
<a href="https://www.litellm.ai/support">
<img src="https://img.shields.io/static/v1?label=Chat%20on&message=Slack&color=black&logo=Slack&style=flat-square" alt="Slack">
</a>
</h4>
LiteLLM manages:
- Translate inputs to provider's `completion`, `embedding`, and `image_generation` endpoints
- [Consistent output](https://docs.litellm.ai/docs/completion/output), text responses will always be available at `['choices'][0]['message']['content']`
- Retry/fallback logic across multiple deployments (e.g. Azure/OpenAI) - [Router](https://docs.litellm.ai/docs/routing)
- Set Budgets & Rate limits per project, api key, model [LiteLLM Proxy Server (LLM Gateway)](https://docs.litellm.ai/docs/simple_proxy)
[**Jump to LiteLLM Proxy (LLM Gateway) Docs**](https://github.com/BerriAI/litellm?tab=readme-ov-file#litellm-proxy-server-llm-gateway---docs) <br>
[**Jump to Supported LLM Providers**](https://github.com/BerriAI/litellm?tab=readme-ov-file#supported-providers-docs)
🚨 **Stable Release:** Use docker images with the `-stable` tag. These have undergone 12 hour load tests, before being published. [More information about the release cycle here](https://docs.litellm.ai/docs/proxy/release_cycle)
Support for more providers. Missing a provider or LLM Platform, raise a [feature request](https://github.com/BerriAI/litellm/issues/new?assignees=&labels=enhancement&projects=&template=feature_request.yml&title=%5BFeature%5D%3A+).
# Usage ([**Docs**](https://docs.litellm.ai/docs/))
> [!IMPORTANT]
> LiteLLM v1.0.0 now requires `openai>=1.0.0`. Migration guide [here](https://docs.litellm.ai/docs/migration)
> LiteLLM v1.40.14+ now requires `pydantic>=2.0.0`. No changes required.
<a target="_blank" href="https://colab.research.google.com/github/BerriAI/litellm/blob/main/cookbook/liteLLM_Getting_Started.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
```shell
pip install litellm
```
```python
from litellm import completion
import os
## set ENV variables
os.environ["OPENAI_API_KEY"] = "your-openai-key"
os.environ["ANTHROPIC_API_KEY"] = "your-anthropic-key"
messages = [{ "content": "Hello, how are you?","role": "user"}]
# openai call
response = completion(model="openai/gpt-4o", messages=messages)
# anthropic call
response = completion(model="anthropic/claude-sonnet-4-20250514", messages=messages)
print(response)
```
### Response (OpenAI Format)
```json
{
"id": "chatcmpl-1214900a-6cdd-4148-b663-b5e2f642b4de",
"created": 1751494488,
"model": "claude-sonnet-4-20250514",
"object": "chat.completion",
"system_fingerprint": null,
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "Hello! I'm doing well, thank you for asking. I'm here and ready to help with whatever you'd like to discuss or work on. How are you doing today?",
"role": "assistant",
"tool_calls": null,
"function_call": null
}
}
],
"usage": {
"completion_tokens": 39,
"prompt_tokens": 13,
"total_tokens": 52,
"completion_tokens_details": null,
"prompt_tokens_details": {
"audio_tokens": null,
"cached_tokens": 0
},
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0
}
}
```
Call any model supported by a provider, with `model=<provider_name>/<model_name>`. There might be provider-specific details here, so refer to [provider docs for more information](https://docs.litellm.ai/docs/providers)
## Async ([Docs](https://docs.litellm.ai/docs/completion/stream#async-completion))
```python
from litellm import acompletion
import asyncio
async def test_get_response():
user_message = "Hello, how are you?"
messages = [{"content": user_message, "role": "user"}]
response = await acompletion(model="openai/gpt-4o", messages=messages)
return response
response = asyncio.run(test_get_response())
print(response)
```
## Streaming ([Docs](https://docs.litellm.ai/docs/completion/stream))
liteLLM supports streaming the model response back, pass `stream=True` to get a streaming iterator in response.
Streaming is supported for all models (Bedrock, Huggingface, TogetherAI, Azure, OpenAI, etc.)
```python
from litellm import completion
response = completion(model="openai/gpt-4o", messages=messages, stream=True)
for part in response:
print(part.choices[0].delta.content or "")
# claude sonnet 4
response = completion('anthropic/claude-sonnet-4-20250514', messages, stream=True)
for part in response:
print(part)
```
### Response chunk (OpenAI Format)
```json
{
"id": "chatcmpl-fe575c37-5004-4926-ae5e-bfbc31f356ca",
"created": 1751494808,
"model": "claude-sonnet-4-20250514",
"object": "chat.completion.chunk",
"system_fingerprint": null,
"choices": [
{
"finish_reason": null,
"index": 0,
"delta": {
"provider_specific_fields": null,
"content": "Hello",
"role": "assistant",
"function_call": null,
"tool_calls": null,
"audio": null
},
"logprobs": null
}
],
"provider_specific_fields": null,
"stream_options": null,
"citations": null
}
```
## Logging Observability ([Docs](https://docs.litellm.ai/docs/observability/callbacks))
LiteLLM exposes pre defined callbacks to send data to Lunary, MLflow, Langfuse, DynamoDB, s3 Buckets, Helicone, Promptlayer, Traceloop, Athina, Slack
```python
from litellm import completion
## set env variables for logging tools (when using MLflow, no API key set up is required)
os.environ["LUNARY_PUBLIC_KEY"] = "your-lunary-public-key"
os.environ["HELICONE_API_KEY"] = "your-helicone-auth-key"
os.environ["LANGFUSE_PUBLIC_KEY"] = ""
os.environ["LANGFUSE_SECRET_KEY"] = ""
os.environ["ATHINA_API_KEY"] = "your-athina-api-key"
os.environ["OPENAI_API_KEY"] = "your-openai-key"
# set callbacks
litellm.success_callback = ["lunary", "mlflow", "langfuse", "athina", "helicone"] # log input/output to lunary, langfuse, supabase, athina, helicone etc
#openai call
response = completion(model="openai/gpt-4o", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
```
# LiteLLM Proxy Server (LLM Gateway) - ([Docs](https://docs.litellm.ai/docs/simple_proxy))
Track spend + Load Balance across multiple projects
[Hosted Proxy (Preview)](https://docs.litellm.ai/docs/hosted)
The proxy provides:
1. [Hooks for auth](https://docs.litellm.ai/docs/proxy/virtual_keys#custom-auth)
2. [Hooks for logging](https://docs.litellm.ai/docs/proxy/logging#step-1---create-your-custom-litellm-callback-class)
3. [Cost tracking](https://docs.litellm.ai/docs/proxy/virtual_keys#tracking-spend)
4. [Rate Limiting](https://docs.litellm.ai/docs/proxy/users#set-rate-limits)
## 📖 Proxy Endpoints - [Swagger Docs](https://litellm-api.up.railway.app/)
## Quick Start Proxy - CLI
```shell
pip install 'litellm[proxy]'
```
### Step 1: Start litellm proxy
```shell
$ litellm --model huggingface/bigcode/starcoder
#INFO: Proxy running on http://0.0.0.0:4000
```
### Step 2: Make ChatCompletions Request to Proxy
> [!IMPORTANT]
> 💡 [Use LiteLLM Proxy with Langchain (Python, JS), OpenAI SDK (Python, JS) Anthropic SDK, Mistral SDK, LlamaIndex, Instructor, Curl](https://docs.litellm.ai/docs/proxy/user_keys)
```python
import openai # openai v1.0.0+
client = openai.OpenAI(api_key="anything",base_url="http://0.0.0.0:4000") # set proxy to base_url
# request sent to model set on litellm proxy, `litellm --model`
response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
{
"role": "user",
"content": "this is a test request, write a short poem"
}
])
print(response)
```
## Proxy Key Management ([Docs](https://docs.litellm.ai/docs/proxy/virtual_keys))
Connect the proxy with a Postgres DB to create proxy keys
```bash
# Get the code
git clone https://github.com/BerriAI/litellm
# Go to folder
cd litellm
# Add the master key - you can change this after setup
echo 'LITELLM_MASTER_KEY="sk-1234"' > .env
# Add the litellm salt key - you cannot change this after adding a model
# It is used to encrypt / decrypt your LLM API Key credentials
# We recommend - https://1password.com/password-generator/
# password generator to get a random hash for litellm salt key
echo 'LITELLM_SALT_KEY="sk-1234"' >> .env
source .env
# Start
docker compose up
```
UI on `/ui` on your proxy server
![ui_3](https://github.com/BerriAI/litellm/assets/29436595/47c97d5e-b9be-4839-b28c-43d7f4f10033)
Set budgets and rate limits across multiple projects
`POST /key/generate`
### Request
```shell
curl 'http://0.0.0.0:4000/key/generate' \
--header 'Authorization: Bearer sk-1234' \
--header 'Content-Type: application/json' \
--data-raw '{"models": ["gpt-3.5-turbo", "gpt-4", "claude-2"], "duration": "20m","metadata": {"user": "ishaan@berri.ai", "team": "core-infra"}}'
```
### Expected Response
```shell
{
"key": "sk-kdEXbIqZRwEeEiHwdg7sFA", # Bearer token
"expires": "2023-11-19T01:38:25.838000+00:00" # datetime object
}
```
## Supported Providers ([Docs](https://docs.litellm.ai/docs/providers))
| Provider | [Completion](https://docs.litellm.ai/docs/#basic-usage) | [Streaming](https://docs.litellm.ai/docs/completion/stream#streaming-responses) | [Async Completion](https://docs.litellm.ai/docs/completion/stream#async-completion) | [Async Streaming](https://docs.litellm.ai/docs/completion/stream#async-streaming) | [Async Embedding](https://docs.litellm.ai/docs/embedding/supported_embedding) | [Async Image Generation](https://docs.litellm.ai/docs/image_generation) |
|-------------------------------------------------------------------------------------|---------------------------------------------------------|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------|-------------------------------------------------------------------------------|-------------------------------------------------------------------------|
| [openai](https://docs.litellm.ai/docs/providers/openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [Meta - Llama API](https://docs.litellm.ai/docs/providers/meta_llama) | ✅ | ✅ | ✅ | ✅ | | |
| [azure](https://docs.litellm.ai/docs/providers/azure) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [AI/ML API](https://docs.litellm.ai/docs/providers/aiml) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [aws - sagemaker](https://docs.litellm.ai/docs/providers/aws_sagemaker) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [aws - bedrock](https://docs.litellm.ai/docs/providers/bedrock) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [google - vertex_ai](https://docs.litellm.ai/docs/providers/vertex) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| [google - palm](https://docs.litellm.ai/docs/providers/palm) | ✅ | ✅ | ✅ | ✅ | | |
| [google AI Studio - gemini](https://docs.litellm.ai/docs/providers/gemini) | ✅ | ✅ | ✅ | ✅ | | |
| [mistral ai api](https://docs.litellm.ai/docs/providers/mistral) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ | | |
| [CompactifAI](https://docs.litellm.ai/docs/providers/compactifai) | ✅ | ✅ | ✅ | ✅ | | |
| [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ | | |
| [empower](https://docs.litellm.ai/docs/providers/empower) | ✅ | ✅ | ✅ | ✅ |
| [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ | | |
| [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ | | |
| [openrouter](https://docs.litellm.ai/docs/providers/openrouter) | ✅ | ✅ | ✅ | ✅ | | |
| [ai21](https://docs.litellm.ai/docs/providers/ai21) | ✅ | ✅ | ✅ | ✅ | | |
| [baseten](https://docs.litellm.ai/docs/providers/baseten) | ✅ | ✅ | ✅ | ✅ | | |
| [vllm](https://docs.litellm.ai/docs/providers/vllm) | ✅ | ✅ | ✅ | ✅ | | |
| [nlp_cloud](https://docs.litellm.ai/docs/providers/nlp_cloud) | ✅ | ✅ | ✅ | ✅ | | |
| [aleph alpha](https://docs.litellm.ai/docs/providers/aleph_alpha) | ✅ | ✅ | ✅ | ✅ | | |
| [petals](https://docs.litellm.ai/docs/providers/petals) | ✅ | ✅ | ✅ | ✅ | | |
| [ollama](https://docs.litellm.ai/docs/providers/ollama) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [deepinfra](https://docs.litellm.ai/docs/providers/deepinfra) | ✅ | ✅ | ✅ | ✅ | | |
| [perplexity-ai](https://docs.litellm.ai/docs/providers/perplexity) | ✅ | ✅ | ✅ | ✅ | | |
| [Groq AI](https://docs.litellm.ai/docs/providers/groq) | ✅ | ✅ | ✅ | ✅ | | |
| [Deepseek](https://docs.litellm.ai/docs/providers/deepseek) | ✅ | ✅ | ✅ | ✅ | | |
| [anyscale](https://docs.litellm.ai/docs/providers/anyscale) | ✅ | ✅ | ✅ | ✅ | | |
| [IBM - watsonx.ai](https://docs.litellm.ai/docs/providers/watsonx) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [voyage ai](https://docs.litellm.ai/docs/providers/voyage) | | | | | ✅ | |
| [xinference [Xorbits Inference]](https://docs.litellm.ai/docs/providers/xinference) | | | | | ✅ | |
| [FriendliAI](https://docs.litellm.ai/docs/providers/friendliai) | ✅ | ✅ | ✅ | ✅ | | |
| [Galadriel](https://docs.litellm.ai/docs/providers/galadriel) | ✅ | ✅ | ✅ | ✅ | | |
| [GradientAI](https://docs.litellm.ai/docs/providers/gradient_ai) | ✅ | ✅ | | | | |
| [Novita AI](https://novita.ai/models/llm?utm_source=github_litellm&utm_medium=github_readme&utm_campaign=github_link) | ✅ | ✅ | ✅ | ✅ | | |
| [Featherless AI](https://docs.litellm.ai/docs/providers/featherless_ai) | ✅ | ✅ | ✅ | ✅ | | |
| [Nebius AI Studio](https://docs.litellm.ai/docs/providers/nebius) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [Heroku](https://docs.litellm.ai/docs/providers/heroku) | ✅ | ✅ | | | | |
| [OVHCloud AI Endpoints](https://docs.litellm.ai/docs/providers/ovhcloud) | ✅ | ✅ | | | | |
[**Read the Docs**](https://docs.litellm.ai/docs/)
## Run in Developer mode
### Services
1. Setup .env file in root
2. Run dependant services `docker-compose up db prometheus`
### Backend
1. (In root) create virtual environment `python -m venv .venv`
2. Activate virtual environment `source .venv/bin/activate`
3. Install dependencies `pip install -e ".[all]"`
4. Start proxy backend `python litellm/proxy_cli.py`
### Frontend
1. Navigate to `ui/litellm-dashboard`
2. Install dependencies `npm install`
3. Run `npm run dev` to start the dashboard
# Enterprise
For companies that need better security, user management and professional support
[Talk to founders](https://calendly.com/d/4mp-gd3-k5k/litellm-1-1-onboarding-chat)
This covers:
-**Features under the [LiteLLM Commercial License](https://docs.litellm.ai/docs/proxy/enterprise):**
-**Feature Prioritization**
-**Custom Integrations**
-**Professional Support - Dedicated discord + slack**
-**Custom SLAs**
-**Secure access with Single Sign-On**
# Contributing
We welcome contributions to LiteLLM! Whether you're fixing bugs, adding features, or improving documentation, we appreciate your help.
## Quick Start for Contributors
This requires poetry to be installed.
```bash
git clone https://github.com/BerriAI/litellm.git
cd litellm
make install-dev # Install development dependencies
make format # Format your code
make lint # Run all linting checks
make test-unit # Run unit tests
make format-check # Check formatting only
```
For detailed contributing guidelines, see [CONTRIBUTING.md](CONTRIBUTING.md).
## Code Quality / Linting
LiteLLM follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html).
Our automated checks include:
- **Black** for code formatting
- **Ruff** for linting and code quality
- **MyPy** for type checking
- **Circular import detection**
- **Import safety checks**
All these checks must pass before your PR can be merged.
# Support / talk with founders
- [Schedule Demo 👋](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version)
- [Community Discord 💭](https://discord.gg/wuPM9dRgDw)
- [Community Slack 💭](https://www.litellm.ai/support)
- Our numbers 📞 +1 (770) 8783-106 / +1 (412) 618-6238
- Our emails ✉️ ishaan@berri.ai / krrish@berri.ai
# Why did we build this
- **Need for simplicity**: Our code started to get extremely complicated managing & translating calls between Azure, OpenAI and Cohere.
# Contributors
<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<!-- markdownlint-restore -->
<!-- prettier-ignore-end -->
<!-- ALL-CONTRIBUTORS-LIST:END -->
<a href="https://github.com/BerriAI/litellm/graphs/contributors">
<img src="https://contrib.rocks/image?repo=BerriAI/litellm" />
</a>

27
apps/litellm/data.yml Normal file
View File

@@ -0,0 +1,27 @@
name: LiteLLM
tags:
- 实用工具
- AI
title: 使用 OpenAI 格式调用所有 LLM API
[Bedrock、Huggingface、VertexAI、TogetherAI、Azure、OpenAI、Groq 等]
description:
en: Call all LLM APIs using the OpenAI format [Bedrock, Huggingface, VertexAI,
TogetherAI, Azure, OpenAI, Groq etc.]
zh: 使用 OpenAI 格式调用所有 LLM API
[Bedrock、Huggingface、VertexAI、TogetherAI、Azure、OpenAI、Groq 等]
additionalProperties:
key: litellm
name: LiteLLM
tags:
- Tool
- AI
shortDescZh: 使用 OpenAI 格式调用所有 LLM API
[Bedrock、Huggingface、VertexAI、TogetherAI、Azure、OpenAI、Groq 等]
shortDescEn: Call all LLM APIs using the OpenAI format [Bedrock, Huggingface,
VertexAI, TogetherAI, Azure, OpenAI, Groq etc.]
type: website
crossVersionUpdate: true
limit: 0
website: https://github.com/BerriAI/litellm
github: https://github.com/BerriAI/litellm
document: https://docs.litellm.ai/docs/

View File

@@ -0,0 +1,21 @@
additionalProperties:
formFields:
- default: "4000"
envKey: PANEL_APP_PORT_HTTP
label:
en: Port
zh: 端口
required: true
type: number
edit: true
rule: paramPort
- default: sk-litellm-change-in-production
envKey: LITELLM_MASTER_KEY
label:
en: LITELLM_MASTER_KEY
zh: LITELLM密钥
required: true
type: password
edit: true
rule: paramComplexity
random: true

View File

View File

@@ -0,0 +1,79 @@
services:
litellm:
image: ghcr.io/berriai/litellm:main-stable
container_name: ${CONTAINER_NAME}
#########################################
## Uncomment these lines to start proxy with a config.yaml file ##
# volumes:
# - ./config.yaml:/app/config.yaml
# command:
# - "--config=/app/config.yaml"
##############################################
ports:
- "${PANEL_APP_PORT_HTTP}:4000"
environment:
DATABASE_URL: "postgresql://llmproxy:dbpassword9090@db:5432/litellm"
STORE_MODEL_IN_DB: "True"
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY}
depends_on:
- db
healthcheck:
test: [ "CMD-SHELL", "wget --no-verbose --tries=1 http://localhost:4000/health/liveliness || exit 1" ]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- 1panel-network
labels:
createdBy: "Apps"
db:
image: postgres:16
restart: always
container_name: ${CONTAINER_NAME}-db
environment:
POSTGRES_DB: litellm
POSTGRES_USER: llmproxy
POSTGRES_PASSWORD: dbpassword9090
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
interval: 1s
timeout: 5s
retries: 10
networks:
- 1panel-network
labels:
createdBy: "Apps"
prometheus:
image: prom/prometheus
container_name: ${CONTAINER_NAME}-prometheus
volumes:
- prometheus_data:/prometheus
- ./prometheus.yml:/etc/prometheus/prometheus.yml
ports:
- "9090:9090"
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention.time=15d"
restart: always
networks:
- 1panel-network
labels:
createdBy: "Apps"
volumes:
prometheus_data:
driver: local
postgres_data:
name: litellm_postgres_data
networks:
1panel-network:
external: true

View File

@@ -0,0 +1,7 @@
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'litellm'
static_configs:
- targets: ['litellm:4000']

BIN
apps/litellm/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

View File

@@ -0,0 +1,21 @@
additionalProperties:
formFields:
- default: "4000"
envKey: PANEL_APP_PORT_HTTP
label:
en: Port
zh: 端口
required: true
type: number
edit: true
rule: paramPort
- default: sk-litellm-change-in-production
envKey: LITELLM_MASTER_KEY
label:
en: LITELLM_MASTER_KEY
zh: LITELLM密钥
required: true
type: password
edit: true
rule: paramComplexity
random: true

View File

@@ -0,0 +1,79 @@
services:
litellm:
image: ghcr.io/berriai/litellm:v1.83.0-nightly
container_name: ${CONTAINER_NAME}
#########################################
## Uncomment these lines to start proxy with a config.yaml file ##
# volumes:
# - ./config.yaml:/app/config.yaml
# command:
# - "--config=/app/config.yaml"
##############################################
ports:
- "${PANEL_APP_PORT_HTTP}:4000"
environment:
DATABASE_URL: "postgresql://llmproxy:dbpassword9090@db:5432/litellm"
STORE_MODEL_IN_DB: "True"
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY}
depends_on:
- db
healthcheck:
test: [ "CMD-SHELL", "wget --no-verbose --tries=1 http://localhost:4000/health/liveliness || exit 1" ]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- 1panel-network
labels:
createdBy: "Apps"
db:
image: postgres:16
restart: always
container_name: ${CONTAINER_NAME}-db
environment:
POSTGRES_DB: litellm
POSTGRES_USER: llmproxy
POSTGRES_PASSWORD: dbpassword9090
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
interval: 1s
timeout: 5s
retries: 10
networks:
- 1panel-network
labels:
createdBy: "Apps"
prometheus:
image: prom/prometheus
container_name: ${CONTAINER_NAME}-prometheus
volumes:
- prometheus_data:/prometheus
- ./prometheus.yml:/etc/prometheus/prometheus.yml
ports:
- "9090:9090"
command:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention.time=15d"
restart: always
networks:
- 1panel-network
labels:
createdBy: "Apps"
volumes:
prometheus_data:
driver: local
postgres_data:
name: litellm_postgres_data
networks:
1panel-network:
external: true

View File

@@ -0,0 +1,7 @@
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'litellm'
static_configs:
- targets: ['litellm:4000']

View File

View File

@@ -1,6 +1,6 @@
services:
n8n:
image: n8nio/n8n:2.15.0
image: n8nio/n8n:2.17.0
container_name: ${CONTAINER_NAME}
restart: always
user: 1000:1000

View File

@@ -1,67 +0,0 @@
services:
new-api:
image: calciumion/new-api:v0.11.9
container_name: ${CONTAINER_NAME}
restart: always
ports:
- ${PANEL_APP_PORT_HTTP}:3000
networks:
- 1panel-network
command: --log-dir /app/logs
volumes:
- ./data:/data
- ./logs:/app/logs
environment:
- SQL_DSN=root:${PANEL_DB_ROOT_PASSWORD}@tcp(${CONTAINER_NAME}-mysql:3306)/${PANEL_DB_NAME}
- TZ=Asia/Shanghai
- REDIS_CONN_STRING=redis://${CONTAINER_NAME}-redis
- ERROR_LOG_ENABLED=true
- BATCH_UPDATE_ENABLED=true
depends_on:
redis:
condition: service_started
mysql:
condition: service_healthy
healthcheck:
test:
- CMD-SHELL
- 'wget -q -O - http://localhost:3000/api/status | grep -o ''"success":\s*true''
| awk -F: ''{print $$2}'''
interval: 30s
timeout: 10s
retries: 3
labels:
createdBy: Apps
mysql:
image: mysql:8.2
container_name: ${CONTAINER_NAME}-mysql
restart: always
volumes:
- mysql_data:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=${PANEL_DB_ROOT_PASSWORD}
- MYSQL_DATABASE=${PANEL_DB_NAME}
networks:
- 1panel-network
healthcheck:
test:
- CMD
- mysqladmin
- ping
- -h
- localhost
interval: 5s
timeout: 5s
retries: 10
start_period: 30s
redis:
image: redis:latest
container_name: ${CONTAINER_NAME}-redis
restart: always
networks:
- 1panel-network
volumes:
mysql_data: null
networks:
1panel-network:
external: true

View File

@@ -1,6 +1,6 @@
services:
new-api:
image: calciumion/new-api:v0.11.7
image: calciumion/new-api:v0.12.3
container_name: ${CONTAINER_NAME}
restart: always
ports:

View File

@@ -1,6 +1,6 @@
services:
new-api:
image: calciumion/new-api:v0.11.8
image: calciumion/new-api:v0.12.5
container_name: ${CONTAINER_NAME}
restart: always
ports:

View File

@@ -1,6 +1,6 @@
services:
new-api:
image: calciumion/new-api:v0.11.6
image: calciumion/new-api:v0.12.6
container_name: ${CONTAINER_NAME}
restart: always
ports:

View File

@@ -1,6 +1,6 @@
services:
new-api:
image: calciumion/new-api:v0.11.5
image: calciumion/new-api:v0.12.8
container_name: ${CONTAINER_NAME}
restart: always
ports:

View File

@@ -0,0 +1,67 @@
services:
new-api:
image: calciumion/new-api:v0.12.9
container_name: ${CONTAINER_NAME}
restart: always
ports:
- ${PANEL_APP_PORT_HTTP}:3000
networks:
- 1panel-network
command: --log-dir /app/logs
volumes:
- ./data:/data
- ./logs:/app/logs
environment:
- SQL_DSN=root:${PANEL_DB_ROOT_PASSWORD}@tcp(${CONTAINER_NAME}-mysql:3306)/${PANEL_DB_NAME}
- TZ=Asia/Shanghai
- REDIS_CONN_STRING=redis://${CONTAINER_NAME}-redis
- ERROR_LOG_ENABLED=true
- BATCH_UPDATE_ENABLED=true
depends_on:
redis:
condition: service_started
mysql:
condition: service_healthy
healthcheck:
test:
- CMD-SHELL
- 'wget -q -O - http://localhost:3000/api/status | grep -o ''"success":\s*true''
| awk -F: ''{print $$2}'''
interval: 30s
timeout: 10s
retries: 3
labels:
createdBy: Apps
mysql:
image: mysql:8.2
container_name: ${CONTAINER_NAME}-mysql
restart: always
volumes:
- mysql_data:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=${PANEL_DB_ROOT_PASSWORD}
- MYSQL_DATABASE=${PANEL_DB_NAME}
networks:
- 1panel-network
healthcheck:
test:
- CMD
- mysqladmin
- ping
- -h
- localhost
interval: 5s
timeout: 5s
retries: 10
start_period: 30s
redis:
image: redis:latest
container_name: ${CONTAINER_NAME}-redis
restart: always
networks:
- 1panel-network
volumes:
mysql_data: null
networks:
1panel-network:
external: true

View File

@@ -1,6 +1,6 @@
services:
new-api:
image: calciumion/new-api:v0.11.9
image: calciumion/new-api:v0.12.9
container_name: ${CONTAINER_NAME}
restart: always
ports:

View File

@@ -1,6 +1,6 @@
services:
dashboard:
image: ghcr.io/nezhahq/nezha:v2.0.6
image: ghcr.io/nezhahq/nezha:v2.0.7
container_name: ${CONTAINER_NAME}
restart: always
volumes:

View File

@@ -1,6 +1,6 @@
services:
nocodb:
image: nocodb/nocodb:0.301.5
image: nocodb/nocodb:2026.04.0
container_name: ${CONTAINER_NAME}
restart: always
networks:

View File

@@ -1,7 +1,7 @@
services:
nzbget:
container_name: ${CONTAINER_NAME}
image: nzbgetcom/nzbget:v26.0
image: nzbgetcom/nzbget:v26.1
restart: always
networks:
- 1panel-network

View File

@@ -1,6 +1,6 @@
services:
prompt-optimizer:
image: linshen/prompt-optimizer:2.7.0
image: linshen/prompt-optimizer:2.9.3
container_name: ${CONTAINER_NAME}
restart: unless-stopped
ports:

Some files were not shown because too many files have changed in this diff Show More