feat(litellm): 添加v1.83.0-nightly版本并更新配置

更新latest版本的docker-compose.yml和data.yml配置,添加v1.83.0-nightly版本的配置文件
将LITELLM_MASTER_KEY字段类型改为password并更新默认值
This commit is contained in:
arch3rPro
2026-04-03 15:07:11 +08:00
parent 20ea51d3ec
commit 74b14d5feb
8 changed files with 35 additions and 35 deletions

View File

@@ -9,13 +9,13 @@ additionalProperties:
type: number type: number
edit: true edit: true
rule: paramPort rule: paramPort
- default: sk-1234 - default: sk-litellm-change-in-production
envKey: LITELLM_MASTER_KEY envKey: LITELLM_MASTER_KEY
label: label:
en: LITELLM_MASTER_KEY en: LITELLM_MASTER_KEY
zh: LITELLM密钥 zh: LITELLM密钥
required: true required: true
type: text type: password
edit: true edit: true
rule: paramCommon rule: paramComplexity
values: [] random: true

View File

View File

@@ -10,19 +10,19 @@ services:
# - "--config=/app/config.yaml" # - "--config=/app/config.yaml"
############################################## ##############################################
ports: ports:
- ${PANEL_APP_PORT_HTTP}:4000 # Map the container port to the host, change the host port if necessary - "${PANEL_APP_PORT_HTTP}:4000"
environment: environment:
DATABASE_URL: "postgresql://llmproxy:dbpassword9090@db:5432/litellm" DATABASE_URL: "postgresql://llmproxy:dbpassword9090@db:5432/litellm"
STORE_MODEL_IN_DB: "True" # allows adding models to proxy via UI STORE_MODEL_IN_DB: "True"
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY} LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY}
depends_on: depends_on:
- db # Indicates that this service depends on the 'db' service, ensuring 'db' starts first - db
healthcheck: # Defines the health check configuration for the container healthcheck:
test: [ "CMD-SHELL", "wget --no-verbose --tries=1 http://localhost:4000/health/liveliness || exit 1" ] # Command to execute for health check test: [ "CMD-SHELL", "wget --no-verbose --tries=1 http://localhost:4000/health/liveliness || exit 1" ]
interval: 30s # Perform health check every 30 seconds interval: 30s
timeout: 10s # Health check command times out after 10 seconds timeout: 10s
retries: 3 # Retry up to 3 times if health check fails retries: 3
start_period: 40s # Wait 40 seconds after container start before beginning health checks start_period: 40s
networks: networks:
- 1panel-network - 1panel-network
@@ -39,7 +39,7 @@ services:
ports: ports:
- "5432:5432" - "5432:5432"
volumes: volumes:
- postgres_data:/var/lib/postgresql/data # Persists Postgres data across container restarts - postgres_data:/var/lib/postgresql/data
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"] test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
interval: 1s interval: 1s
@@ -72,8 +72,8 @@ volumes:
prometheus_data: prometheus_data:
driver: local driver: local
postgres_data: postgres_data:
name: litellm_postgres_data # Named volume for Postgres data persistence name: litellm_postgres_data
networks: networks:
1panel-network: 1panel-network:
external: true external: true

View File

@@ -4,4 +4,4 @@ global:
scrape_configs: scrape_configs:
- job_name: 'litellm' - job_name: 'litellm'
static_configs: static_configs:
- targets: ['litellm:4000'] # Assuming Litellm exposes metrics at port 4000 - targets: ['litellm:4000']

View File

@@ -9,13 +9,13 @@ additionalProperties:
type: number type: number
edit: true edit: true
rule: paramPort rule: paramPort
- default: sk-1234 - default: sk-litellm-change-in-production
envKey: LITELLM_MASTER_KEY envKey: LITELLM_MASTER_KEY
label: label:
en: LITELLM_MASTER_KEY en: LITELLM_MASTER_KEY
zh: LITELLM密钥 zh: LITELLM密钥
required: true required: true
type: text type: password
edit: true edit: true
rule: paramCommon rule: paramComplexity
values: [] random: true

View File

@@ -1,6 +1,6 @@
services: services:
litellm: litellm:
image: ghcr.io/berriai/litellm:main-v1.82.6-nightly image: ghcr.io/berriai/litellm:v1.83.0-nightly
container_name: ${CONTAINER_NAME} container_name: ${CONTAINER_NAME}
######################################### #########################################
## Uncomment these lines to start proxy with a config.yaml file ## ## Uncomment these lines to start proxy with a config.yaml file ##
@@ -10,19 +10,19 @@ services:
# - "--config=/app/config.yaml" # - "--config=/app/config.yaml"
############################################## ##############################################
ports: ports:
- ${PANEL_APP_PORT_HTTP}:4000 # Map the container port to the host, change the host port if necessary - "${PANEL_APP_PORT_HTTP}:4000"
environment: environment:
DATABASE_URL: "postgresql://llmproxy:dbpassword9090@db:5432/litellm" DATABASE_URL: "postgresql://llmproxy:dbpassword9090@db:5432/litellm"
STORE_MODEL_IN_DB: "True" # allows adding models to proxy via UI STORE_MODEL_IN_DB: "True"
LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY} LITELLM_MASTER_KEY: ${LITELLM_MASTER_KEY}
depends_on: depends_on:
- db # Indicates that this service depends on the 'db' service, ensuring 'db' starts first - db
healthcheck: # Defines the health check configuration for the container healthcheck:
test: [ "CMD-SHELL", "wget --no-verbose --tries=1 http://localhost:4000/health/liveliness || exit 1" ] # Command to execute for health check test: [ "CMD-SHELL", "wget --no-verbose --tries=1 http://localhost:4000/health/liveliness || exit 1" ]
interval: 30s # Perform health check every 30 seconds interval: 30s
timeout: 10s # Health check command times out after 10 seconds timeout: 10s
retries: 3 # Retry up to 3 times if health check fails retries: 3
start_period: 40s # Wait 40 seconds after container start before beginning health checks start_period: 40s
networks: networks:
- 1panel-network - 1panel-network
@@ -39,7 +39,7 @@ services:
ports: ports:
- "5432:5432" - "5432:5432"
volumes: volumes:
- postgres_data:/var/lib/postgresql/data # Persists Postgres data across container restarts - postgres_data:/var/lib/postgresql/data
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"] test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
interval: 1s interval: 1s
@@ -72,8 +72,8 @@ volumes:
prometheus_data: prometheus_data:
driver: local driver: local
postgres_data: postgres_data:
name: litellm_postgres_data # Named volume for Postgres data persistence name: litellm_postgres_data
networks: networks:
1panel-network: 1panel-network:
external: true external: true

View File

@@ -4,4 +4,4 @@ global:
scrape_configs: scrape_configs:
- job_name: 'litellm' - job_name: 'litellm'
static_configs: static_configs:
- targets: ['litellm:4000'] # Assuming Litellm exposes metrics at port 4000 - targets: ['litellm:4000']