Remake Repository
28
.github/workflows/llm-code-review.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: LLM Code Review
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
llm-code-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: fit2cloud/LLM-CodeReview-Action@main
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.FIT2CLOUDRD_LLM_CODE_REVIEW_TOKEN }}
|
||||
OPENAI_API_KEY: ${{ secrets.ALIYUN_LLM_API_KEY }}
|
||||
LANGUAGE: English
|
||||
OPENAI_API_ENDPOINT: https://dashscope.aliyuncs.com/compatible-mode/v1
|
||||
MODEL: qwen2.5-coder-3b-instruct
|
||||
PROMPT: "Please check the following code differences for any irregularities, potential issues, or optimization suggestions, and provide your answers in English."
|
||||
top_p: 1
|
||||
temperature: 1
|
||||
# max_tokens: 10000
|
||||
MAX_PATCH_LENGTH: 10000
|
||||
IGNORE_PATTERNS: "/node_modules,*.md,/dist,/.github"
|
||||
FILE_PATTERNS: "*.java,*.go,*.py,*.vue,*.ts,*.js,*.css,*.scss,*.html,*.yml,*.yaml"
|
||||
26
.github/workflows/renovate-app-version.sh
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
# This script copies the version from docker-compose.yml to config.json.
|
||||
|
||||
app_name=$1
|
||||
old_version=$2
|
||||
|
||||
# find all docker-compose files under apps/$app_name (there should be only one)
|
||||
docker_compose_files=$(find apps/$app_name/$old_version -name docker-compose.yml)
|
||||
|
||||
for docker_compose_file in $docker_compose_files
|
||||
do
|
||||
# Assuming that the app version will be from the first docker image
|
||||
first_service=$(yq '.services | keys | .[0]' $docker_compose_file)
|
||||
|
||||
image=$(yq .services.$first_service.image $docker_compose_file)
|
||||
|
||||
# Only apply changes if the format is <image>:<version>
|
||||
if [[ "$image" == *":"* ]]; then
|
||||
version=$(cut -d ":" -f2- <<< "$image")
|
||||
|
||||
# Trim the "v" prefix
|
||||
trimmed_version=${version/#"v"}
|
||||
|
||||
mv apps/$app_name/$old_version apps/$app_name/$trimmed_version
|
||||
fi
|
||||
done
|
||||
53
.github/workflows/renovate-app-version.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Update app version in Renovate Branches
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'renovate/*' ]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual-trigger:
|
||||
description: 'Manually trigger Renovate'
|
||||
default: ''
|
||||
jobs:
|
||||
update-app-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure repo
|
||||
run: |
|
||||
git config --local user.email "githubaction@githubaction.com"
|
||||
git config --local user.name "github-action update-app-version"
|
||||
|
||||
- name: Get list of updated files by the last commit in this branch separated by space
|
||||
id: updated-files
|
||||
run: |
|
||||
echo "::set-output name=files::$(git diff-tree --no-commit-id --name-only -r ${{ github.sha }} | tr '\n' ' ')"
|
||||
|
||||
- name: Run renovate-app-version.sh on updated files
|
||||
run: |
|
||||
IFS=' ' read -ra files <<< "${{ steps.updated-files.outputs.files }}"
|
||||
|
||||
for file in "${files[@]}"; do
|
||||
if [[ $file == *"docker-compose.yml"* ]]; then
|
||||
app_name=$(echo $file | cut -d'/' -f 2)
|
||||
old_version=$(echo $file | cut -d'/' -f 3)
|
||||
chmod +x .github/workflows/renovate-app-version.sh
|
||||
.github/workflows/renovate-app-version.sh $app_name $old_version
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Commit & Push Changes
|
||||
run: |
|
||||
IFS=' ' read -ra files <<< "${{ steps.updated-files.outputs.files }}"
|
||||
|
||||
for file in "${files[@]}"; do
|
||||
if [[ $file == *"docker-compose.yml"* ]]; then
|
||||
app_name=$(echo $file | cut -d'/' -f 2)
|
||||
git add "apps/$app_name/*" && git commit -m "Update app version [skip ci]" --no-verify && git push || true
|
||||
fi
|
||||
done
|
||||
|
||||
22
.github/workflows/renovate.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Renovate
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual-trigger:
|
||||
description: 'Manually trigger Renovate'
|
||||
default: ''
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Run Renovate
|
||||
uses: renovatebot/github-action@d385c88822a237acaead89c462fa0aef7502748f # v41.0.11
|
||||
with:
|
||||
useSlim: false
|
||||
token: ${{ secrets.GITHUBTOKEN }}
|
||||
8
.gitignore
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
# OSX
|
||||
.DS_Store
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
|
||||
# VSCode
|
||||
.vscode
|
||||
BIN
1Panel-Appstore.png
Normal file
|
After Width: | Height: | Size: 840 KiB |
320
ADD_APP.md
Normal file
@@ -0,0 +1,320 @@
|
||||
# 如何提交自己想要的应用
|
||||
|
||||
## 4. 创建应用文件 (以 Halo 为例)
|
||||
|
||||
|
||||
|
||||
v1.3 及以上版本可以在 1Panel 宿主机使用 1panel app init <应用的key> <应用的版本> 来快速初始化应用文件 (注意不是 1pctl 命令)
|
||||
|
||||
文件夹格式
|
||||
|
||||
```
|
||||
├──halo // 以 halo 的 key 命名 ,下面解释什么是 key
|
||||
├── logo.png // 应用 logo , 最好是 180 * 180 px
|
||||
├── data.yml // 应用声明文件
|
||||
├── README.md // 应用的 README
|
||||
├── 2.2.0 // 应用版本 注意不要以 v 开头
|
||||
│ ├── data.yml // 应用的参数配置,下面有详细介绍
|
||||
│ ├── data // 挂载出来的目录
|
||||
| ├── scripts // 脚本目录 存放 init.sh upgrade.sh uninstall.sh
|
||||
│ └── docker-compose.yml // docker-compose 文件
|
||||
└── 2.3.2
|
||||
├── data.yml
|
||||
├── data
|
||||
└── docker-compose.yml
|
||||
```
|
||||
|
||||
|
||||
|
||||
应用声明文件 data.yml
|
||||
|
||||
> 本文件主要用于声明应用的一些信息
|
||||
|
||||
```
|
||||
additionalProperties: #固定参数
|
||||
key: halo #应用的 key ,仅限英文,用于在 Linux 创建文件夹
|
||||
name: Halo #应用名称
|
||||
tags:
|
||||
- WebSite #应用标签,可以有多个,请参照下方的标签列表
|
||||
shortDescZh: 强大易用的开源建站工具 #应用中文描述,不要超过30个字
|
||||
shortDescEn: Powerful and easy-to-use open source website builder #应用英文描述
|
||||
type: website #应用类型,区别于应用分类,只能有一个,请参照下方的类型列表
|
||||
crossVersionUpdate: true #是否可以跨大版本升级
|
||||
limit: 0 #应用安装数量限制,0 代表无限制
|
||||
website: https://halo.run/ #官网地址
|
||||
github: https://github.com/halo-dev/halo #github 地址
|
||||
document: https://docs.halo.run/ #文档地址
|
||||
```
|
||||
|
||||
|
||||
|
||||
应用标签 - tags 字段(持续更新。。。)
|
||||
|
||||
| key | name |
|
||||
| -------- | ---------- |
|
||||
| WebSite | 建站 |
|
||||
| Server | Web 服务器 |
|
||||
| Runtime | 运行环境 |
|
||||
| Database | 数据库 |
|
||||
| Tool | 工具 |
|
||||
| CI/CD | CI/CD |
|
||||
| Local | 本地 |
|
||||
|
||||
应用类型 - type 字段
|
||||
|
||||
| type | 说明 |
|
||||
| ------- | ------------------------------------------------------------ |
|
||||
| website | website 类型在 1Panel 中支持在网站中一键部署,wordpress halo 都是此 type |
|
||||
| runtime | mysql openresty redis 等类型的应用 |
|
||||
| tool | phpMyAdmin redis-commander jenkins 等类型的应用 |
|
||||
|
||||
应用参数配置文件 data.yml (注意区分于应用主目录下面的 data.yaml)
|
||||
|
||||
> 本文件主要用于生成安装时要填写的 form 表单,在应用版本文件夹下面 可以无表单,但是需要有这个 data.yml文件,并且包含 formFields 字段
|
||||
|
||||
以安装 halo 时的 form 表单 为例
|
||||
|
||||

|
||||
|
||||
如果要生成上面的表单,需要这么填写 data.yml
|
||||
|
||||
```
|
||||
additionalProperties: #固定参数
|
||||
formFields:
|
||||
- default: ""
|
||||
envKey: PANEL_DB_HOST #docker-compose 文件中的参数
|
||||
key: mysql #依赖应用的 key , 例如 mysql
|
||||
labelEn: Database Service #英文的label
|
||||
labelZh: 数据库服务 #中文的label
|
||||
required: true #是否必填
|
||||
type: service #如果需要依赖其他应用,例如数据库,使用此 type
|
||||
- default: halo
|
||||
envKey: PANEL_DB_NAME
|
||||
labelEn: Database
|
||||
labelZh: 数据库名
|
||||
random: true #是否在 default 文字后面,增加随机字符串
|
||||
required: true
|
||||
rule: paramCommon #校验规则
|
||||
type: text #需要手动填写的,使用此 type
|
||||
- default: halo
|
||||
envKey: PANEL_DB_USER
|
||||
labelEn: User
|
||||
labelZh: 数据库用户
|
||||
random: true
|
||||
required: true
|
||||
rule: paramCommon
|
||||
type: text
|
||||
- default: halo
|
||||
envKey: PANEL_DB_USER_PASSWORD
|
||||
labelEn: Password
|
||||
labelZh: 数据库用户密码
|
||||
random: true
|
||||
required: true
|
||||
rule: paramComplexity
|
||||
type: password #密码字段使用此 type
|
||||
- default: admin
|
||||
envKey: HALO_ADMIN
|
||||
labelEn: Admin Username
|
||||
labelZh: 超级管理员用户名
|
||||
required: true
|
||||
rule: paramCommon
|
||||
type: text
|
||||
- default: halo
|
||||
envKey: HALO_ADMIN_PASSWORD
|
||||
labelEn: Admin Password
|
||||
labelZh: 超级管理员密码
|
||||
random: true
|
||||
required: true
|
||||
rule: paramComplexity
|
||||
type: password
|
||||
- default: http://localhost:8080
|
||||
edit: true
|
||||
envKey: HALO_EXTERNAL_URL
|
||||
labelEn: External URL
|
||||
labelZh: 外部访问地址
|
||||
required: true
|
||||
rule: paramExtUrl
|
||||
type: text
|
||||
- default: 8080
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Port
|
||||
labelZh: 端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number #端口使用此 type
|
||||
```
|
||||
|
||||
|
||||
|
||||
关于端口字段:
|
||||
|
||||
1. PANEL_APP_PORT_HTTP 有 web 访问端口的优先使用此 envKey
|
||||
2. envKey 中包含 PANEL_APP_PORT 前缀会被认定为端口类型,并且用于安装前的端口占用校验。注意:端口需要是外部端口
|
||||
|
||||
关于 type 字段:
|
||||
|
||||
| type | 说明 |
|
||||
| -------- | ------------------------------------------------------------ |
|
||||
| service | `type: service` 如果该应用需要依赖其他组件,如 mysql redis 等,可以通过 `key: mysql` 定义依赖的名称,在创建应用时会要求先创建依赖的应用。 |
|
||||
| password | `type: password` 敏感信息,如密码相关的字段会默认不显示明文。 |
|
||||
| text | `type: text` 一般内容,比如数据库名称,默认明文显示。 |
|
||||
| number | `type: number` 一般用在端口相关的配置上,只允许输入数字。 |
|
||||
| select | `type: select` 选项,比如 `true`, `false`,日志等级等。 |
|
||||
|
||||
简单的例子
|
||||
|
||||
```
|
||||
# type: service,定义一个 mysql 的 service 依赖。
|
||||
- default: ""
|
||||
envKey: DB_HOST
|
||||
key: mysql
|
||||
labelEn: Database Service
|
||||
labelZh: 数据库服务
|
||||
required: true
|
||||
type: service
|
||||
|
||||
# type: password
|
||||
- default: Np2qgqtiUayA857GpuVI0Wtg
|
||||
edit: true
|
||||
envKey: DB_PASSWORD
|
||||
labelEn: Database password
|
||||
labelZh: 数据库密码
|
||||
required: true
|
||||
type: password
|
||||
|
||||
# type: text
|
||||
- default: 192.168.100.100
|
||||
disabled: true.
|
||||
envKey: REDIS_HOST
|
||||
labelEn: Redis host
|
||||
labelZh: Redis 主机
|
||||
type: text
|
||||
|
||||
# type: number
|
||||
- default: 3306
|
||||
disabled: true
|
||||
envKey: DB_PORT
|
||||
labelEn: Database port
|
||||
labelZh: 数据库端口
|
||||
rule: paramPort
|
||||
type: number
|
||||
|
||||
# type: select
|
||||
- default: "ERROR"
|
||||
envKey: LOG_LEVEL
|
||||
labelEn: Log level
|
||||
labelZh: 日志级别
|
||||
required: true
|
||||
type: select
|
||||
values:
|
||||
- label: DEBUG
|
||||
value: "DEBUG"
|
||||
- label: INFO
|
||||
value: "INFO"
|
||||
- label: WARNING
|
||||
value: "WARNING"
|
||||
- label: ERROR
|
||||
value: "ERROR"
|
||||
- label: CRITICAL
|
||||
value: "CRITICAL"
|
||||
```
|
||||
|
||||
|
||||
|
||||
rule 字段目前支持的几种校验
|
||||
|
||||
| rule | 规则 |
|
||||
| --------------- | ------------------------------------------------------ |
|
||||
| paramPort | 用于限制端口范围为 1-65535 |
|
||||
| paramExtUrl | 格式为 http(s)://(域名/ip):(端口) |
|
||||
| paramCommon | 英文、数字、.-和_,长度2-30 |
|
||||
| paramComplexity | 支持英文、数字、.%@$!&~_-,长度6-30,特殊字符不能在首尾 |
|
||||
|
||||
应用 docker-compose.yml 文件
|
||||
|
||||
> ${PANEL_APP_PORT_HTTP} 类型的参数,都在 data.yml 中有声明
|
||||
|
||||
```
|
||||
version: "3"
|
||||
services:
|
||||
halo:
|
||||
image: halohub/halo:2.2.0
|
||||
container_name: ${CONTAINER_NAME} // 固定写法,勿改
|
||||
restart: always
|
||||
networks:
|
||||
- 1panel-network // 1Panel 创建的应用都在此网络下
|
||||
volumes:
|
||||
- ./data:/root/.halo2
|
||||
ports:
|
||||
- ${PANEL_APP_PORT_HTTP}:8090
|
||||
command:
|
||||
- --spring.r2dbc.url=r2dbc:pool:${HALO_PLATFORM}://${PANEL_DB_HOST}:${HALO_DB_PORT}/${PANEL_DB_NAME}
|
||||
- --spring.r2dbc.username=${PANEL_DB_USER}
|
||||
- --spring.r2dbc.password=${PANEL_DB_USER_PASSWORD}
|
||||
- --spring.sql.init.platform=${HALO_PLATFORM}
|
||||
- --halo.external-url=${HALO_EXTERNAL_URL}
|
||||
- --halo.security.initializer.superadminusername=${HALO_ADMIN}
|
||||
- --halo.security.initializer.superadminpassword=${HALO_ADMIN_PASSWORD}
|
||||
labels:
|
||||
createdBy: "Apps"
|
||||
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
```
|
||||
|
||||
|
||||
|
||||
## 5. 脚本
|
||||
|
||||
|
||||
|
||||
1Panel 在 安装之前、升级之前、卸载之后支持执行 .sh 脚本
|
||||
分别对应 init.sh upgrade.sh uninstall.sh
|
||||
存放目录(以halo为例) : halo/2.2.0/scripts
|
||||
|
||||
## 6. 本地测试
|
||||
|
||||
|
||||
|
||||
将应用目录上传到 1Panel 的 /opt/1panel/resource/apps/local 文件夹下
|
||||
注意:/opt 为 1Panel 默认安装目录,请根据自己的实际情况修改
|
||||
上传完成后,目录结构如下
|
||||
|
||||
```
|
||||
├──halo
|
||||
├── logo.png
|
||||
├── data.yml
|
||||
├── README.md
|
||||
├── 2.2.0
|
||||
├── data.yml
|
||||
├── data
|
||||
└── docker-compose.yml
|
||||
```
|
||||
|
||||
|
||||
|
||||
在 1Panel 应用商店中,点击更新应用列表按钮同步本地应用
|
||||
|
||||
> v1.2 版本及之前版本的本地应用,请参考[这个文档](https://github.com/1Panel-dev/appstore/wiki/v1.2-版本本地应用升级指南)修改
|
||||
|
||||
## 7. 提交文件
|
||||
|
||||
|
||||
|
||||
```
|
||||
git add .
|
||||
git commit -m "Add my-app"
|
||||
git push origin dev
|
||||
```
|
||||
|
||||
|
||||
|
||||
## 8. 提交 Pull Request
|
||||
|
||||
|
||||
|
||||
- 在你的仓库点击 Pull requests 菜单
|
||||
- 点击 New pull request ,填写标题和描述
|
||||
- 选择由你的分支提交到 1Panel-dev/appstore
|
||||
BIN
LLM_Free-API.png
Normal file
|
After Width: | Height: | Size: 235 KiB |
697
README.md
Normal file
@@ -0,0 +1,697 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/arch3rPro/1Panel-Appstore/dev/1Panel-Appstore.png" >
|
||||
</p>
|
||||
<h1 align="center">1Panel AppStore </h1>
|
||||
<br>
|
||||
<p align="center">
|
||||
<img src="https://img.shields.io/badge/Author-Arch3rPro-blueviolet.svg">
|
||||
<img src="https://img.shields.io/badge/Release-v1.0-blue.svg" />
|
||||
<img src="https://img.shields.io/badge/Platform-Docker-red.svg" />
|
||||
<img src="https://img.shields.io/badge/Awesome-List-9cf.svg">
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="http://www.secnews.xyz/">作者博客</a> |
|
||||
<a href="http://www.secnews.xyz/document">文档教程</a> |
|
||||
<a href="http://nav.secnews.xyz/">安全导航</a> |
|
||||
<a href="http://nav.secnews.xyz/wechat.html">微信公众号导航</a>
|
||||
</p>
|
||||
|
||||
### 📖 仓库介绍
|
||||
|
||||
本仓库包含多个适用于 1Panel 的应用,旨在为用户提供简单、快速的安装与更新体验。应用均为开源项目,支持通过 1Panel 的计划任务功能自动化安装和更新。通过仓库提供的脚本,可以轻松地将应用集成到 1Panel 系统中。
|
||||
|
||||
### ⚠️ 仓库申明
|
||||
|
||||
- 非官方,第三方应用商店
|
||||
- 部分应用源于[okxlin/appstore](https://github.com/okxlin/appstore),本仓库做了更新适配
|
||||
- 不对任何原始镜像的有效性做出任何明示或暗示的保证或声明,安全性和风险自查
|
||||
|
||||
### 📱 应用列表
|
||||
|
||||
以下是当前在本仓库中提供的应用列表及其版本信息,**点击应用名称可查看应用详细介绍文档**
|
||||
|
||||
#### 🤖LLM免费API接口
|
||||
|
||||
支持一键部署AI免费API接口,使用方式请参考应用内**README介绍**
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/deepseek-free-api/README.md">
|
||||
<img src="./apps/deepseek-free-api/logo.png" width="60" height="60" alt="DeepSeek-Free-API">
|
||||
<br><b>DeepSeek-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 DeepSeek-V3 & R1大模型逆向API
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/deepseek-free-api)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/kimi-free-api/README.md">
|
||||
<img src="./apps/kimi-free-api/logo.png" width="60" height="60" alt="Kimi-Free-API">
|
||||
<br><b>Kimi-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 KIMI AI 长文本大模型逆向API
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/kimi-free-api)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/qwen-free-api/README.md">
|
||||
<img src="./apps/qwen-free-api/logo.png" width="60" height="60" alt="Qwen-Free-API">
|
||||
<br><b>Qwen-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 阿里通义千问2.5大模型逆向API
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/qwen-free-api)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/glm-free-api/README.md">
|
||||
<img src="./apps/glm-free-api/logo.png" width="60" height="60" alt="GLM-Free-API">
|
||||
<br><b>GLM-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 智谱清言ChatGLM-4-Plus大模型逆向API
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/glm-free-api)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/jimeng-free-api/README.md">
|
||||
<img src="./apps/jimeng-free-api/logo.png" width="60" height="60" alt="Jimeng-Free-API">
|
||||
<br><b>Jimeng-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 即梦3.0逆向API【特长:图像生成顶流】
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/jimeng-free-api)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/spark-free-api/README.md">
|
||||
<img src="./apps/spark-free-api/logo.png" width="60" height="60" alt="Spark-Free-API">
|
||||
<br><b>Spark-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 讯飞星火大模型逆向API【特长:办公助手】
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/spark-free-api)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/minimax-free-api/README.md">
|
||||
<img src="./apps/minimax-free-api/logo.png" width="60" height="60" alt="Minimax-Free-API">
|
||||
<br><b>Minimax-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 MiniMax大模型海螺AI逆向API【特长:超自然语音】
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/minimax-free-api)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/step-free-api/README.md">
|
||||
<img src="./apps/step-free-api/logo.png" width="60" height="60" alt="Step-Free-API">
|
||||
<br><b>Step-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 阶跃星辰跃问Step 多模态大模型逆向API【特长:超强多模态】
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/step-free-api)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/metaso-free-api/README.md">
|
||||
<img src="./apps/metaso-free-api/logo.png" width="60" height="60" alt="Metaso-Free-API">
|
||||
<br><b>Metaso-Free-API</b>
|
||||
</a>
|
||||
|
||||
🚀 秘塔AI搜索逆向API【特长:超强检索超长输出】
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/LLM-Red-Team/metaso-free-api)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
|
||||
#### 📝 文档与内容管理
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/docmost/README.md">
|
||||
<img src="./apps/docmost/logo.png" width="60" height="60" alt="DocMost">
|
||||
<br><b>DocMost</b>
|
||||
</a>
|
||||
|
||||
轻量级文档管理系统,支持多人协作编辑与版本控制
|
||||
|
||||
<kbd>v0.20.4</kbd> • [官网链接](https://github.com/docmost/docmost)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/hexo/README.md">
|
||||
<img src="./apps/hexo/logo.png" width="60" height="60" alt="Hexo">
|
||||
<br><b>Hexo</b>
|
||||
</a>
|
||||
|
||||
快速静态博客框架,支持Markdown编写,丰富的插件和主题
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/hexojs/hexo)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/qexo/README.md">
|
||||
<img src="./apps/qexo/logo.png" width="60" height="60" alt="QEXO">
|
||||
<br><b>QEXO</b>
|
||||
</a>
|
||||
|
||||
美观强大的在线静态博客管理器,支持多种平台
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/Qexo/Qexo)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/md/README.md">
|
||||
<img src="./apps/md/logo.png" width="60" height="60" alt="MD">
|
||||
<br><b>MD</b>
|
||||
</a>
|
||||
|
||||
微信 Markdown 编辑器,自动即时渲染为微信图文
|
||||
|
||||
<kbd>v2.0.3</kbd> • [官网链接](https://github.com/doocs/md)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/karakeep/README.md">
|
||||
<img src="./apps/karakeep/logo.png" width="60" height="60" alt="Karakeep">
|
||||
<br><b>Karakeep</b>
|
||||
</a>
|
||||
|
||||
自托管全能书签管理工具,支持AI自动标签功能
|
||||
|
||||
<kbd>v0.24.1</kbd> • [官网链接](https://github.com/karakeep-app/karakeep)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/linkwarden/README.md">
|
||||
<img src="./apps/linkwarden/logo.png" width="60" height="60" alt="Linkwarden">
|
||||
<br><b>Linkwarden</b>
|
||||
</a>
|
||||
|
||||
自托管协作书签管理工具,支持网页归档和团队协作
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/linkwarden/linkwarden)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/affine/README.md">
|
||||
<img src="./apps/affine/logo.png" width="60" height="60" alt="AFFiNE">
|
||||
<br><b>AFFiNE</b>
|
||||
</a>
|
||||
|
||||
文档、白板和数据库完全整合的工作空间
|
||||
|
||||
<kbd>stable</kbd> • [官网链接](https://github.com/toeverything/AFFiNE)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/blinko/README.md">
|
||||
<img src="./apps/blinko/logo.png" width="60" height="60" alt="Blinko">
|
||||
<br><b>Blinko</b>
|
||||
</a>
|
||||
|
||||
开源自托管个人笔记工具,支持AI增强笔记检索
|
||||
|
||||
<kbd>v1.0.3</kbd> • [官网链接](https://github.com/blinko-space/blinko)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### 🔒 安全与网络工具
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/easytier/README.md">
|
||||
<img src="./apps/easytier/logo.png" width="60" height="60" alt="EasyTier">
|
||||
<br><b>EasyTier</b>
|
||||
</a>
|
||||
|
||||
🌐 简单安全去中心化的内网穿透 VPN 组网方案
|
||||
|
||||
<kbd>v2.3.1</kbd> • [官网链接](https://github.com/EasyTier/Easytier)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/npc/README.md">
|
||||
<img src="./apps/npc/logo.png" width="60" height="60" alt="NPC">
|
||||
<br><b>NPC</b>
|
||||
</a>
|
||||
|
||||
🔗 NPS客户端,轻量级高性能内网穿透代理工具
|
||||
|
||||
<kbd>v0.29.21</kbd> • [官网链接](https://github.com/djylb/nps)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/nps/README.md">
|
||||
<img src="./apps/nps/logo.png" width="60" height="60" alt="NPS">
|
||||
<br><b>NPS</b>
|
||||
</a>
|
||||
|
||||
🔄 轻量级高性能内网穿透代理服务器
|
||||
|
||||
<kbd>v0.29.21</kbd> • [官网链接](https://github.com/djylb/nps)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/safeline/README.md">
|
||||
<img src="./apps/safeline/logo.png" width="60" height="60" alt="SafeLine">
|
||||
<br><b>SafeLine</b>
|
||||
</a>
|
||||
|
||||
🛡️ 简单好用的Web应用防火墙(WAF)
|
||||
|
||||
<kbd>V7.6.2</kbd> • [官网链接](https://github.com/chaitin/SafeLine)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/netbox/README.md">
|
||||
<img src="./apps/netbox/logo.png" width="60" height="60" alt="NetBox">
|
||||
<br><b>NetBox</b>
|
||||
</a>
|
||||
|
||||
🏢 开源数据中心和网络资源管理平台
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://netbox.dev/)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/kali-linux/README.md">
|
||||
<img src="./apps/kali-linux/logo.png" width="60" height="60" alt="Kali Linux">
|
||||
<br><b>Kali Linux</b>
|
||||
</a>
|
||||
|
||||
🔍 专为渗透测试和安全审计设计的Linux发行版
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://www.kali.org/)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### 🤖 AI 与智能应用
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/lobe-chat-data/README.md">
|
||||
<img src="./apps/lobe-chat-data/logo.png" width="60" height="60" alt="LobeChat-Data">
|
||||
<br><b>LobeChat-Data</b>
|
||||
</a>
|
||||
|
||||
💬 开源现代设计的 ChatGPT/LLMs UI/框架
|
||||
|
||||
<kbd>1.73.0</kbd> • [官网链接](https://github.com/lobehub/lobe-chat)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/dify/README.md">
|
||||
<img src="./apps/dify/logo.png" width="60" height="60" alt="Dify">
|
||||
<br><b>Dify</b>
|
||||
</a>
|
||||
|
||||
🤖 开源LLM应用开发平台,支持AI工作流和RAG管道
|
||||
|
||||
<kbd>1.1.1</kbd> • [官网链接](https://github.com/langgenius/dify)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### 🎵 多媒体管理
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/xiaomusic/README.md">
|
||||
<img src="./apps/xiaomusic/logo.png" width="60" height="60" alt="XiaoMusic">
|
||||
<br><b>XiaoMusic</b>
|
||||
</a>
|
||||
|
||||
🎵 使用小爱/红米音箱播放音乐,支持yt-dlp下载
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/hanxi/xiaomusic)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/openlist/README.md">
|
||||
<img src="./apps/openlist/logo.png" width="60" height="60" alt="OpenList">
|
||||
<br><b>OpenList</b>
|
||||
</a>
|
||||
|
||||
📁 开源支持多存储的文件列表程序和私人网盘
|
||||
|
||||
<kbd>4.0.2</kbd> • [官网链接](https://github.com/AlistGo/alist)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/piclist/README.md">
|
||||
<img src="./apps/piclist/logo.png" width="60" height="60" alt="PicList">
|
||||
<br><b>PicList</b>
|
||||
</a>
|
||||
|
||||
🖼️ 高效云存储和图床平台管理工具
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/Kuingsmile/PicList)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/nzbget/README.md">
|
||||
<img src="./apps/nzbget/logo.png" width="60" height="60" alt="NZBGet">
|
||||
<br><b>NZBGet</b>
|
||||
</a>
|
||||
|
||||
📥 高性能Usenet下载工具,支持Web界面管理
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://nzbget.net/)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/transmission/README.md">
|
||||
<img src="./apps/transmission/logo.png" width="60" height="60" alt="Transmission">
|
||||
<br><b>Transmission</b>
|
||||
</a>
|
||||
|
||||
🌱 开源高性能BT/PT下载工具,支持Web界面
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://transmissionbt.com/)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### ⚡ 运维监控
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/nezha/README.md">
|
||||
<img src="./apps/nezha/logo.png" width="60" height="60" alt="Nezha">
|
||||
<br><b>Nezha</b>
|
||||
</a>
|
||||
|
||||
📊 开源轻量易用的服务器监控运维工具
|
||||
|
||||
<kbd>v1.12.4</kbd> • [官网链接](https://github.com/naiba/nezha/)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/glance-agent/README.md">
|
||||
<img src="./apps/glance-agent/logo.png" width="60" height="60" alt="Glance-Agent">
|
||||
<br><b>Glance-Agent</b>
|
||||
</a>
|
||||
|
||||
👁️ 轻量级可视化综合服务器监控方案
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/arch3rPro/Glance-Monitor)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/next-terminal/README.md">
|
||||
<img src="./apps/next-terminal/logo.png" width="60" height="60" alt="Next-Terminal">
|
||||
<br><b>Next-Terminal</b>
|
||||
</a>
|
||||
|
||||
🖥️ 简单好用安全的开源交互审计系统
|
||||
|
||||
<kbd>v2.4.10</kbd> • [官网链接](https://github.com/dushixiang/next-terminal)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/netdata/README.md">
|
||||
<img src="./apps/netdata/logo.png" width="60" height="60" alt="Netdata">
|
||||
<br><b>Netdata</b>
|
||||
</a>
|
||||
|
||||
📈 Linux性能实时监测工具,全方位性能监控
|
||||
|
||||
<kbd>v2.2.6</kbd> • [官网链接](https://github.com/netdata/netdata)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/remmina/README.md">
|
||||
<img src="./apps/remmina/logo.png" width="60" height="60" alt="Remmina">
|
||||
<br><b>Remmina</b>
|
||||
</a>
|
||||
|
||||
🖥️ 开源跨平台远程桌面客户端,支持多种协议
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://remmina.org/)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### 🗄️ Nas工具
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/resilio-sync/README.md">
|
||||
<img src="./apps/resilio-sync/logo.png" width="60" height="60" alt="Resilio-Sync">
|
||||
<br><b>Resilio-Sync</b>
|
||||
</a>
|
||||
|
||||
🔄 跨平台P2P文件同步和分享工具
|
||||
|
||||
<kbd>v3.0.3</kbd> • [官网链接](https://www.resilio.com/sync/)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/adguardhome-sync/README.md">
|
||||
<img src="./apps/adguardhome-sync/logo.png" width="60" height="60" alt="AdGuardHome-Sync">
|
||||
<br><b>AdGuardHome-Sync</b>
|
||||
</a>
|
||||
|
||||
🔄 多AdGuardHome实例间配置同步工具
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/bakito/adguardhome-sync)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/pairdrop/README.md">
|
||||
<img src="./apps/pairdrop/logo.png" width="60" height="60" alt="PairDrop">
|
||||
<br><b>PairDrop</b>
|
||||
</a>
|
||||
|
||||
📱 类AirDrop工具,支持网络文件、文本共享
|
||||
|
||||
<kbd>Latest</kbd> • [官网链接](https://github.com/schlagmichdoch/PairDrop)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### 🐳 容器管理
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/cup/README.md">
|
||||
<img src="./apps/cup/logo.png" width="60" height="60" alt="Cup">
|
||||
<br><b>Cup</b>
|
||||
</a>
|
||||
|
||||
🔄 支持Web和Cli方式检查容器镜像更新
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/sergi0g/cup/)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/docker-copilot/README.md">
|
||||
<img src="./apps/docker-copilot/logo.png" width="60" height="60" alt="Docker-Copilot">
|
||||
<br><b>Docker-Copilot</b>
|
||||
</a>
|
||||
|
||||
🚀 主打便捷的docker容器管理工具,一键更新容器
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/onlyLTY/dockerCopilot)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/wud/README.md">
|
||||
<img src="./apps/wud/logo.png" width="60" height="60" alt="Wud">
|
||||
<br><b>Wud</b>
|
||||
</a>
|
||||
|
||||
🔄 监控Docker基础镜像自动更新
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/sergi0g/cup/)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/hubcmdui/README.md">
|
||||
<img src="./apps/hubcmdui/logo.png" width="60" height="60" alt="Hubcmd-UI">
|
||||
<br><b>Hubcmd-UI</b>
|
||||
</a>
|
||||
|
||||
⚡ Docker镜像加速命令查询获取、镜像搜索、配置教程文档展示UI面板
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/dqzboy/Docker-Proxy)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/diun/README.md">
|
||||
<img src="./apps/diun/logo.png" width="60" height="60" alt="Diun">
|
||||
<br><b>Diun</b>
|
||||
</a>
|
||||
|
||||
🔔 Docker镜像更新监控和通知工具
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://github.com/crazy-max/diun)
|
||||
|
||||
</td>
|
||||
<td width="33%" align="center">
|
||||
|
||||
<a href="./apps/arcane/README.md">
|
||||
<img src="./apps/arcane/logo.png" width="60" height="60" alt="Arcane">
|
||||
<br><b>Arcane</b>
|
||||
</a>
|
||||
|
||||
🧙♂️ 现代化开源Docker管理Web面板
|
||||
|
||||
<kbd>latest</kbd> • [官网链接](https://arcane.ofkm.dev/)
|
||||
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### 🚀 使用方法
|
||||
|
||||
#### 📋 添加脚本到 1Panel 计划任务
|
||||
|
||||
1. 在 1Panel 控制面板中,进入"计划任务"页面。
|
||||
2. 点击"新增任务",选择任务类型为"Shell 脚本"。
|
||||
3. 在脚本框中粘贴以下代码:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
|
||||
# 清理旧的临时目录
|
||||
rm -rf /tmp/appstore_merge
|
||||
|
||||
# 克隆 appstore-arch3rPro
|
||||
git clone --depth=1 https://ghfast.top/https://github.com/arch3rPro/appstore /tmp/appstore_merge/appstore-arch3rPro
|
||||
|
||||
# 复制 数据(完整复制)
|
||||
cp -rf /tmp/appstore_merge/appstore-arch3rPro/apps/* /opt/1panel/resource/apps/local/
|
||||
|
||||
# 清理临时目录
|
||||
rm -rf /tmp/appstore_merge
|
||||
echo "应用商店数据已更新"
|
||||
```
|
||||
22
apps/adguardhome-sync/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# AdGuardHome-Sync
|
||||
|
||||
AdGuardHome-Sync 是一个用于在多个 AdGuardHome 实例之间同步配置的工具。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 支持多个 AdGuardHome 实例之间的配置同步
|
||||
- 提供 Web API 接口进行管理
|
||||
- 支持定时同步任务
|
||||
- 基于 LinuxServer.io 的 Docker 镜像
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 部署后访问 Web API 端口(默认 8080)
|
||||
2. 配置 AdGuardHome 实例的连接信息
|
||||
3. 设置同步规则和时间间隔
|
||||
4. 启动同步任务
|
||||
|
||||
## 相关链接
|
||||
|
||||
- [GitHub 项目](https://github.com/bakito/adguardhome-sync)
|
||||
- [LinuxServer.io 文档](https://docs.linuxserver.io/images/docker-adguardhome-sync/)
|
||||
22
apps/adguardhome-sync/README_en.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# AdGuardHome-Sync
|
||||
|
||||
AdGuardHome-Sync is a tool for synchronizing configurations between multiple AdGuardHome instances.
|
||||
|
||||
## Features
|
||||
|
||||
- Synchronize configurations between multiple AdGuardHome instances
|
||||
- Web API interface for management
|
||||
- Scheduled synchronization tasks
|
||||
- Based on LinuxServer.io Docker image
|
||||
|
||||
## Usage
|
||||
|
||||
1. Access the Web API port (default 8080) after deployment
|
||||
2. Configure connection information for AdGuardHome instances
|
||||
3. Set synchronization rules and intervals
|
||||
4. Start synchronization tasks
|
||||
|
||||
## Links
|
||||
|
||||
- [GitHub Project](https://github.com/bakito/adguardhome-sync)
|
||||
- [LinuxServer.io Documentation](https://docs.linuxserver.io/images/docker-adguardhome-sync/)
|
||||
32
apps/adguardhome-sync/data.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
name: AdGuardHome-Sync
|
||||
tags:
|
||||
- 安全
|
||||
title: AdGuardHome 配置同步工具
|
||||
description: AdGuardHome 配置同步工具,支持多个 AdGuardHome 实例之间的配置同步
|
||||
additionalProperties:
|
||||
key: adguardhome-sync
|
||||
name: AdGuardHome-Sync
|
||||
tags:
|
||||
- Security
|
||||
shortDescZh: AdGuardHome 配置同步工具
|
||||
shortDescEn: AdGuardHome configuration synchronization tool
|
||||
description:
|
||||
en: AdGuardHome configuration synchronization tool for syncing configurations between multiple AdGuardHome instances
|
||||
ja: 複数のAdGuardHomeインスタンス間で設定を同期するためのAdGuardHome設定同期ツール
|
||||
ms: Alat penyegerakan konfigurasi AdGuardHome untuk menyegerakkan konfigurasi antara pelbagai instans AdGuardHome
|
||||
pt-br: Ferramenta de sincronização de configuração do AdGuardHome para sincronizar configurações entre múltiplas instâncias do AdGuardHome
|
||||
ru: Инструмент синхронизации конфигурации AdGuardHome для синхронизации конфигураций между несколькими экземплярами AdGuardHome
|
||||
ko: 여러 AdGuardHome 인스턴스 간의 구성을 동기화하기 위한 AdGuardHome 구성 동기화 도구
|
||||
zh-Hant: AdGuardHome 配置同步工具,支援多個 AdGuardHome 實例之間的配置同步
|
||||
zh: AdGuardHome 配置同步工具,支持多个 AdGuardHome 实例之间的配置同步
|
||||
type: website
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 0
|
||||
website: https://github.com/bakito/adguardhome-sync
|
||||
github: https://github.com/bakito/adguardhome-sync
|
||||
document: https://docs.linuxserver.io/images/docker-adguardhome-sync/
|
||||
architectures:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm/v7
|
||||
19
apps/adguardhome-sync/latest/data.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: 8080
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Web API Port
|
||||
labelZh: Web API 端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
label:
|
||||
en: Web API Port
|
||||
ja: Web API ポート
|
||||
ms: Port API Web
|
||||
pt-br: Porta da API Web
|
||||
ru: Порт Web API
|
||||
ko: Web API 포트
|
||||
zh-Hant: Web API 埠
|
||||
zh: Web API 端口
|
||||
20
apps/adguardhome-sync/latest/docker-compose.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
services:
|
||||
adguardhome-sync:
|
||||
container_name: ${CONTAINER_NAME}
|
||||
restart: always
|
||||
networks:
|
||||
- 1panel-network
|
||||
ports:
|
||||
- "${PANEL_APP_PORT_HTTP}:8080"
|
||||
volumes:
|
||||
- ./config:/config
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
- TZ=Asia/Shanghai
|
||||
image: lscr.io/linuxserver/adguardhome-sync:latest
|
||||
labels:
|
||||
createdBy: "Apps"
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
BIN
apps/adguardhome-sync/logo.png
Normal file
|
After Width: | Height: | Size: 2.7 KiB |
21
apps/affine/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# AFFiNE
|
||||
|
||||
**AFFiNE** 是一个开源的一体化工作区和操作系统,适用于构建知识库的所有构建块以及更多内容——wiki、知识管理、演示和数字资产。它是 Notion 和 Miro 的更好替代品。
|
||||
|
||||
## 主要特征:
|
||||
|
||||
### 任何形式的块的真正画布,文档和白板现已完全合并。
|
||||
|
||||
- 许多编辑器应用都声称自己是生产力的画布,但 AFFiNE 是极少数允许您在无边画布上放置任何构建块的应用之一——富文本、便签、任何嵌入式网页、多视图数据库、链接页面、形状甚至幻灯片。我们拥有一切。
|
||||
|
||||
### 多模式 AI 合作伙伴随时准备投入任何工作
|
||||
|
||||
- 撰写专业的工作报告?将大纲变成富有表现力且易于展示的幻灯片?将文章总结为结构良好的思维导图?整理工作计划和待办事项?或者... 只需一个提示即可直接绘制和编写原型应用程序和网页?有了你,AFFiNE AI 可以将您的创造力推向想象的边缘。
|
||||
|
||||
### 本地优先、实时协作
|
||||
|
||||
- 我们喜欢本地优先的理念,即无论使用云端,您始终拥有磁盘上的数据。此外,AFFiNE 支持在 Web 和跨平台客户端上进行实时同步和协作。
|
||||
|
||||
### 自托管并塑造您自己的 AFFiNE
|
||||
|
||||
- 您可以自由地管理、自行托管、分叉和构建自己的 AFFiNE。插件社区和第三方模块即将推出。Blocksuite 上还有更多牵引力。查看那里了解如何自行托管 AFFiNE。
|
||||
16
apps/affine/README_en.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# AFFiNE
|
||||
**AFFiNE** is an open-source integrated workspace and operating system for building knowledge bases and more—wiki, knowledge management, presentations, and digital assets. It's a better alternative to Notion and Miro.
|
||||
|
||||
## Key Features:
|
||||
### True canvas for blocks of any form, documents and whiteboards now fully merged.
|
||||
- Many editor apps claim to be productivity canvases, but AFFiNE is one of the very few apps that allows you to place any building block on an infinite canvas—rich text, sticky notes, embedded webpages, multi-view databases, linked pages, shapes, and even slideshows. We have it all.
|
||||
|
||||
### Multi-mode AI partner ready for any task
|
||||
- Writing professional work reports? Turning outlines into expressive and easy-to-present slideshows? Summarizing articles into well-structured mind maps? Organizing work plans and to-dos? Or... drawing and writing prototype applications and webpages with just a prompt? With you, AFFiNE AI can push your creativity to the edge of imagination.
|
||||
|
||||
### Local-first, real-time collaboration
|
||||
- We love the local-first concept, meaning you always own your data on disk, regardless of cloud usage. Additionally, AFFiNE supports real-time synchronization and collaboration on the web and across platform clients.
|
||||
|
||||
### Self-host and shape your own AFFiNE
|
||||
- You're free to manage, self-host, fork, and build your own AFFiNE. Plugin communities and third-party modules are coming soon. Blocksuite has even more traction. Check it out to learn how to self-host AFFiNE.
|
||||
|
||||
33
apps/affine/data.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
name: AFFiNE
|
||||
tags:
|
||||
- 实用工具
|
||||
title: 将文档、白板和数据库完全整合的工作空间
|
||||
description: 将文档、白板和数据库完全整合的工作空间
|
||||
additionalProperties:
|
||||
key: affine
|
||||
name: AFFiNE
|
||||
tags:
|
||||
- Tool
|
||||
shortDescZh: 将文档、白板和数据库完全整合的工作空间
|
||||
shortDescEn: A workspace with fully merged docs, whiteboards and databases
|
||||
type: website
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 0
|
||||
website: https://affine.pro
|
||||
github: https://github.com/toeverything/AFFiNE
|
||||
document: https://docs.affine.pro/docs/self-host-affine
|
||||
architectures:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm/v7
|
||||
description:
|
||||
en: A workspace with fully merged docs, whiteboards and databases
|
||||
zh: 将文档、白板和数据库完全整合的工作空间
|
||||
zh-Hant: 將文檔、白板和數據庫完全整合的工作空間
|
||||
ja: 文書、ホワイトボード、データベースを完全に統合したワークスペース
|
||||
ms: Ruang kerja dengan dokumen, papan putih, dan pangkalan data yang sepenuhnya digabungkan
|
||||
pt-br: Um espaço de trabalho com documentos, quadros brancos e bancos de dados totalmente integrados
|
||||
ru: Рабочее пространство с полностью объединенными документами, досками и базами данных
|
||||
ko: 문서, 화이트보드 및 데이터베이스가 완전히 통합된 작업 공간
|
||||
memoryRequired: 1024
|
||||
BIN
apps/affine/logo.png
Normal file
|
After Width: | Height: | Size: 3.8 KiB |
70
apps/affine/stable/data.yml
Normal file
@@ -0,0 +1,70 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: 3010
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Web Port
|
||||
labelZh: HTTP 端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
label:
|
||||
en: Web Port
|
||||
zh: HTTP 端口
|
||||
- default: affine
|
||||
envKey: DB_DATABASE
|
||||
labelEn: Database
|
||||
labelZh: 数据库名
|
||||
required: true
|
||||
rule: paramCommon
|
||||
type: text
|
||||
label:
|
||||
en: Database
|
||||
zh: 数据库名
|
||||
- default: affine
|
||||
envKey: DB_USERNAME
|
||||
labelEn: User
|
||||
labelZh: 数据库用户
|
||||
random: true
|
||||
required: true
|
||||
rule: paramCommon
|
||||
type: text
|
||||
label:
|
||||
en: User
|
||||
zh: 数据库用户
|
||||
- default: affine
|
||||
envKey: DB_PASSWORD
|
||||
labelEn: Password
|
||||
labelZh: 数据库用户密码
|
||||
random: true
|
||||
required: true
|
||||
type: password
|
||||
label:
|
||||
en: Password
|
||||
zh: 数据库用户密码
|
||||
- default: ~/.affine/self-host/storage
|
||||
envKey: UPLOAD_LOCATION
|
||||
labelEn: Upload Location
|
||||
labelZh: 上传目录
|
||||
required: true
|
||||
type: text
|
||||
label:
|
||||
en: Upload Location
|
||||
zh: 上传目录
|
||||
- default: ~/.affine/self-host/storage
|
||||
envKey: CONFIG_LOCATION
|
||||
labelEn: Config Location
|
||||
labelZh: 配置目录
|
||||
required: true
|
||||
type: text
|
||||
label:
|
||||
en: Config Location
|
||||
zh: 配置目录
|
||||
- default: ~/.affine/self-host/postgres/pgdata
|
||||
envKey: DB_DATA_LOCATION
|
||||
labelEn: Postgre Data Location
|
||||
labelZh: Postgre 数据目录
|
||||
required: true
|
||||
type: text
|
||||
label:
|
||||
en: Postgre Data Location
|
||||
zh: Postgre 数据目录
|
||||
89
apps/affine/stable/docker-compose.yml
Normal file
@@ -0,0 +1,89 @@
|
||||
services:
|
||||
affine:
|
||||
image: ghcr.io/toeverything/affine-graphql:stable
|
||||
container_name: ${CONTAINER_NAME}
|
||||
ports:
|
||||
- ${PANEL_APP_PORT_HTTP}:3010
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_healthy
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
affine_migration:
|
||||
condition: service_completed_successfully
|
||||
volumes:
|
||||
# custom configurations
|
||||
- ${UPLOAD_LOCATION}:/root/.affine/storage
|
||||
- ${CONFIG_LOCATION}:/root/.affine/config
|
||||
environment:
|
||||
- REDIS_SERVER_HOST=redis
|
||||
- DATABASE_URL=postgresql://${DB_USERNAME}:${DB_PASSWORD}@postgres:5432/${DB_DATABASE:-affine}
|
||||
- AFFINE_INDEXER_ENABLED=false
|
||||
networks:
|
||||
- 1panel-network
|
||||
restart: always
|
||||
labels:
|
||||
createdBy: Apps
|
||||
affine_migration:
|
||||
image: ghcr.io/toeverything/affine-graphql:stable
|
||||
container_name: ${CONTAINER_NAME}_migration_job
|
||||
volumes:
|
||||
# custom configurations
|
||||
- ${UPLOAD_LOCATION}:/root/.affine/storage
|
||||
- ${CONFIG_LOCATION}:/root/.affine/config
|
||||
command: ['sh', '-c', 'node ./scripts/self-host-predeploy.js']
|
||||
networks:
|
||||
- 1panel-network
|
||||
environment:
|
||||
- REDIS_SERVER_HOST=redis
|
||||
- DATABASE_URL=postgresql://${DB_USERNAME}:${DB_PASSWORD}@postgres:5432/${DB_DATABASE:-affine}
|
||||
- AFFINE_INDEXER_ENABLED=false
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
labels:
|
||||
createdBy: Apps
|
||||
restart: no
|
||||
redis:
|
||||
image: redis
|
||||
container_name: ${CONTAINER_NAME}_redis
|
||||
healthcheck:
|
||||
test: ['CMD', 'redis-cli', '--raw', 'incr', 'ping']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- 1panel-network
|
||||
labels:
|
||||
createdBy: Apps
|
||||
restart: always
|
||||
|
||||
postgres:
|
||||
image: pgvector/pgvector:pg16
|
||||
container_name: ${CONTAINER_NAME}_postgres
|
||||
volumes:
|
||||
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
|
||||
networks:
|
||||
- 1panel-network
|
||||
labels:
|
||||
createdBy: Apps
|
||||
environment:
|
||||
POSTGRES_USER: ${DB_USERNAME}
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_DB: ${DB_DATABASE:-affine}
|
||||
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||
# you better set a password for you database
|
||||
# or you may add 'POSTGRES_HOST_AUTH_METHOD=trust' to ignore postgres security policy
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
healthcheck:
|
||||
test:
|
||||
['CMD', 'pg_isready', '-U', "${DB_USERNAME}", '-d', "${DB_DATABASE:-affine}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
restart: always
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
36
apps/alist/3.45.0/data.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: 5244
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: WebUI Port
|
||||
labelZh: 网页端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
label:
|
||||
en: WebUI Port
|
||||
ja: WebUI ポート
|
||||
ms: Port WebUI
|
||||
pt-br: Porta WebUI
|
||||
ru: Порт WebUI
|
||||
ko: WebUI 포트
|
||||
zh-Hant: WebUI 埠
|
||||
zh: WebUI 端口
|
||||
- default: 5426
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_S3
|
||||
labelEn: S3 Port
|
||||
labelZh: S3 端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
label:
|
||||
en: S3 Port
|
||||
ja: S3 ポート
|
||||
ms: Port S3
|
||||
pt-br: Porta S3
|
||||
ru: Порт S3
|
||||
ko: S3 포트
|
||||
zh-Hant: S3 埠
|
||||
zh: S3 端口
|
||||
0
apps/alist/3.45.0/data/data/.gitkeep
Normal file
0
apps/alist/3.45.0/data/mnt/.gitkeep
Normal file
23
apps/alist/3.45.0/docker-compose.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
services:
|
||||
alist:
|
||||
container_name: ${CONTAINER_NAME}
|
||||
restart: always
|
||||
networks:
|
||||
- 1panel-network
|
||||
ports:
|
||||
- "${PANEL_APP_PORT_HTTP}:5244"
|
||||
- "${PANEL_APP_PORT_S3}:5426"
|
||||
volumes:
|
||||
- ./data/data:/opt/alist/data
|
||||
- ./data/mnt:/mnt/data
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- UMASK=022
|
||||
image: xhofe/alist:v3.45.0
|
||||
labels:
|
||||
createdBy: "Apps"
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
|
||||
12
apps/alist/3.45.0/scripts/upgrade.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ -f ./.env ]]; then
|
||||
if grep -q 'PANEL_APP_PORT_S3' ./.env; then
|
||||
echo "PANEL_APP_PORT_S3 参数已存在"
|
||||
else
|
||||
echo 'PANEL_APP_PORT_S3=5426' >> ./.env
|
||||
echo "已添加 PANEL_APP_PORT_S3=5426"
|
||||
fi
|
||||
else
|
||||
echo ".env 文件不存在"
|
||||
fi
|
||||
45
apps/alist/README.md
Normal file
@@ -0,0 +1,45 @@
|
||||
## 申明
|
||||
由于原Alist项目所有者、文档、代码以及社群都被大幅度修改,原开发者Xhofe已经离开,当前最后版本为v3.45.0,基于此版本进行封存;后续基于开源社区维护OpenList进行更新;安装时,请选择openlist版本。
|
||||
|
||||
# 账号密码
|
||||
|
||||
容器列表点击 `终端` 按钮,进入容器内执行命令设置密码。
|
||||
|
||||
- **生成随机密码**:`./alist admin random`
|
||||
- **手动设置密码**:`./alist admin set NEW_PASSWORD`
|
||||
|
||||
# AList
|
||||
|
||||
AList 是一个支持多种存储,支持网页浏览和 WebDAV 的文件列表程序,由 gin 和 Solidjs 驱动。
|
||||
|
||||
## 支持的存储:
|
||||
|
||||
- 本地存储
|
||||
- [阿里云盘](https://www.aliyundrive.com/)
|
||||
- OneDrive / Sharepoint([国际版](https://www.office.com/), [世纪互联](https://portal.partner.microsoftonline.cn),de,us)
|
||||
- [天翼云盘](https://cloud.189.cn) (个人云, 家庭云)
|
||||
- [GoogleDrive](https://drive.google.com/)
|
||||
- [123云盘](https://www.123pan.com/)
|
||||
- FTP / SFTP
|
||||
- [PikPak](https://www.mypikpak.com/)
|
||||
- [S3](https://aws.amazon.com/cn/s3/)
|
||||
- [Seafile](https://seafile.com/)
|
||||
- [又拍云对象存储](https://www.upyun.com/products/file-storage)
|
||||
- WebDav(支持无API的OneDrive/SharePoint)
|
||||
- Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ ))
|
||||
- [分秒帧](https://www.mediatrack.cn/)
|
||||
- [和彩云](https://yun.139.com/) (个人云, 家庭云)
|
||||
- [Yandex.Disk](https://disk.yandex.com/)
|
||||
- [百度网盘](http://pan.baidu.com/)
|
||||
- [UC网盘](https://drive.uc.cn)
|
||||
- [夸克网盘](https://pan.quark.cn)
|
||||
- [迅雷网盘](https://pan.xunlei.com)
|
||||
- [蓝奏云](https://www.lanzou.com/)
|
||||
- [阿里云盘分享](https://www.aliyundrive.com/)
|
||||
- [谷歌相册](https://photos.google.com/)
|
||||
- [Mega.nz](https://mega.nz)
|
||||
- [一刻相册](https://photo.baidu.com/)
|
||||
- SMB
|
||||
- [115](https://115.com/)
|
||||
- Cloudreve
|
||||
- [Dropbox](https://www.dropbox.com/)
|
||||
64
apps/alist/README_en.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# AList
|
||||
|
||||
A file list program that supports multiple storage, and supports web browsing and webdav, powered by gin and Solidjs.
|
||||
|
||||
## Supported Storage
|
||||
|
||||
- Local storage
|
||||
- [Crypt](/guide/drivers/Crypt.md)
|
||||
- [Aliyundrive Open](../guide/drivers/aliyundrive_open.md)
|
||||
- [aliyundrive](https://www.alipan.com/)
|
||||
- [OneDrive](./drivers/onedrive.md) /[APP](./drivers/onedrive_app.md)/ Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||
- [GoogleDrive](https://drive.google.com/)
|
||||
- [123pan/Share/Link](https://www.123pan.com/)
|
||||
- [Alist](https://github.com/Xhofe/alist)
|
||||
- FTP
|
||||
- SFTP
|
||||
- [PikPak / share](https://www.mypikpak.com/)
|
||||
- [S3](../guide/drivers/s3.md)
|
||||
- [Doge](../guide/drivers/s3.md#add-object-storage-examples-and-official-documents)
|
||||
- [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- WebDAV
|
||||
- Teambition([China](https://www.teambition.com/),[International](https://us.teambition.com/))
|
||||
- [mediatrack](https://www.mediatrack.cn/)
|
||||
- [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||
- [139yun](https://yun.139.com/) (Personal, Family)
|
||||
- [Wopan](https://pan.wo.cn)
|
||||
- [MoPan](https://mopan.sc.189.cn/mopan/#/downloadPc)
|
||||
- [YandexDisk](https://disk.yandex.com/)
|
||||
- [BaiduNetdisk](https://pan.baidu.com/) / [share](./drivers/baidu_share.md)
|
||||
- [Quark/TV](https://pan.quark.cn/)
|
||||
- [Thunder / X Browser](../guide/drivers/thunder.md)
|
||||
- [Lanzou](https://www.lanzou.com/)、[NewLanzou](https://www.ilanzou.com)
|
||||
- [Feiji Cloud](https://feijipan.com/)
|
||||
- [Aliyundrive share](https://www.alipan.com/)
|
||||
- [Google photo](https://photos.google.com/)
|
||||
- [Mega.nz](https://mega.nz)
|
||||
- [Baidu photo](https://photo.baidu.com/)
|
||||
- [TeraBox](https://www.terabox.com/)
|
||||
- [AList v2/v3](../guide/drivers/Alist%20V2%20V3.md)
|
||||
- SMB
|
||||
- [alias](../guide/advanced/alias.md)
|
||||
- [115](https://115.com/)
|
||||
- [Seafile](https://www.seafile.com/)
|
||||
- Cloudreve
|
||||
- [Trainbit](https://trainbit.com/)
|
||||
- [UrlTree](../guide/drivers/UrlTree.md)
|
||||
- IPFS
|
||||
- [UC Clouddrive/TV](https://drive.uc.cn/)
|
||||
- [Dropbox](https://www.dropbox.com)
|
||||
- [Tencent weiyun](https://www.weiyun.com/)
|
||||
- [vtencent](https://app.v.tencent.com/)
|
||||
- [ChaoxingGroupCloud](../guide/drivers/chaoxing.md)
|
||||
- [Quqi Cloud](https://quqi.com)
|
||||
- [163 Music Drive](../guide/drivers/163music.md)
|
||||
- [halalcloud](../guide/drivers/halalcloud.md)
|
||||
- [LenovoNasShare](https://pc.lenovo.com.cn)
|
||||
|
||||
## Account Password
|
||||
|
||||
Click the `Terminal` button in the container list to enter the container and execute commands to set the password.
|
||||
|
||||
- **Use a random password**: `./alist admin random`
|
||||
- **Or set password manually**: `./alist admin set NEW_PASSWORD`
|
||||
|
||||
37
apps/alist/data.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
name: AList
|
||||
tags:
|
||||
- 实用工具
|
||||
- 云存储
|
||||
title: 支持多存储的文件列表程序和私人网盘
|
||||
description: 支持多存储的文件列表程序和私人网盘
|
||||
additionalProperties:
|
||||
key: alist
|
||||
name: AList
|
||||
tags:
|
||||
- Storage
|
||||
- Tool
|
||||
shortDescZh: 支持多存储的文件列表程序和私人网盘
|
||||
shortDescEn: Supporting multi-storage file listing program and private cloud storage
|
||||
description:
|
||||
en: Supporting multi-storage file listing program and private cloud storage
|
||||
ja: 複数ストレージのファイルリスト表示プログラムとプライベートクラウドストレージのサポート
|
||||
ms: Menyokong program senarai fail multi-penyimpanan dan penyimpanan awan peribadi
|
||||
pt-br: Suporte para programa de listagem de arquivos em múltiplos armazenamentos e armazenamento em nuvem privado
|
||||
ru: Поддержка программы отображения файлов в нескольких хранилищах и частного облачного хранилища
|
||||
ko: 다중 저장소 파일 목록 프로그램 및 개인 클라우드 저장소 지원
|
||||
zh-Hant: 支援多存儲檔案列出程序和私人雲端空間
|
||||
zh: 支持多存储文件列出程序和私有云存储
|
||||
type: website
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 65
|
||||
website: https://alist.nn.ci/
|
||||
github: https://github.com/alist-org/alist
|
||||
document: https://alist.nn.ci/zh/guide/
|
||||
architectures:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm/v7
|
||||
- arm/v6
|
||||
- s390x
|
||||
|
||||
BIN
apps/alist/logo.png
Normal file
|
After Width: | Height: | Size: 1.2 KiB |
40
apps/arcane/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Arcane
|
||||
|
||||
Arcane 是一款现代化、开源的Docker管理Web面板,支持容器、镜像、网络等一站式管理。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 现代化Web界面,操作简洁直观
|
||||
- 支持容器、镜像、网络、卷等Docker资源的可视化管理
|
||||
- 支持多平台和多架构
|
||||
- 支持堆栈(Stack)定义与管理
|
||||
- 数据和设置持久化存储于 `./data` 目录
|
||||
- 挂载Docker套接字,支持主机级管理
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 部署后访问 `http://服务器IP:3000` 进入Web管理界面
|
||||
2. 首次使用请根据界面提示初始化设置
|
||||
3. 数据目录:`./data`
|
||||
4. 挂载宿主机 `/var/run/docker.sock`,实现容器管理
|
||||
5. 环境变量 `PUBLIC_SESSION_SECRET`:用于会话加密,建议使用32位随机字符串,可在应用表单中自定义,默认值为 `arcane-session-4e2b8c7f9d1a6e3b2c5d7f8a1b0c9e6d`。如需更高安全性,可用 `openssl rand -base64 32` 生成。
|
||||
|
||||
### 账户密码
|
||||
|
||||
- 首次运行时,如果不存在用户,Arcane 会创建默认管理员用户。
|
||||
- **用户名:** `arcane`
|
||||
- **密码:** `arcane-admin`
|
||||
- 首次登录必须更改此密码。
|
||||
- 要添加用户:转到**设置 → 用户管理**,然后单击**创建用户**。填写用户名、显示名称、电子邮件和密码。
|
||||
|
||||
## 安全提醒
|
||||
|
||||
- 挂载Docker套接字(/var/run/docker.sock)会赋予容器主机级管理权限,请确保安全使用!
|
||||
- Arcane 目前为预发布软件,功能和界面可能会有较大变动。
|
||||
|
||||
## 相关链接
|
||||
|
||||
- [官方网站](https://arcane.ofkm.dev/)
|
||||
- [GitHub 项目](https://github.com/ofkm/arcane)
|
||||
- [官方文档](https://arcane.ofkm.dev/docs/)
|
||||
- [Docker Hub](https://ghcr.io/ofkm/arcane)
|
||||
40
apps/arcane/README_en.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Arcane
|
||||
|
||||
Arcane is a modern, open-source Docker management web panel for containers, images, networks and more.
|
||||
|
||||
## Features
|
||||
|
||||
- Modern web UI, clean and intuitive
|
||||
- Visual management for containers, images, networks, volumes, etc.
|
||||
- Multi-platform and multi-architecture support
|
||||
- Stack (compose) definition and management
|
||||
- Data and settings persist in `./data` directory
|
||||
- Mount Docker socket for host-level management
|
||||
|
||||
## Usage
|
||||
|
||||
1. After deployment, access `http://your-server-ip:3000` for the web UI
|
||||
2. Follow the initial setup instructions on first use
|
||||
3. Data directory: `./data`
|
||||
4. Mount host `/var/run/docker.sock` for container management
|
||||
5. Environment variable `PUBLIC_SESSION_SECRET`: Used for session encryption. It is recommended to use a 32-character random string. You can customize it in the app form. Default: `arcane-session-4e2b8c7f9d1a6e3b2c5d7f8a1b0c9e6d`. For higher security, generate with `openssl rand -base64 32`.
|
||||
|
||||
## Local User Management
|
||||
|
||||
- On first run, Arcane creates a default admin user if no users exist.
|
||||
- **Username:** `arcane`
|
||||
- **Password:** `arcane-admin`
|
||||
- You must change this password during onboarding.
|
||||
- To add users: Go to **Settings → User Management** and click **Create User**. Fill in username, display name, email, and password.
|
||||
|
||||
## Security Notice
|
||||
|
||||
- Mounting the Docker socket (`/var/run/docker.sock`) gives the container root-level access to the Docker host. Use with caution!
|
||||
- Arcane is pre-release software. Features and UI may change frequently.
|
||||
|
||||
## Links
|
||||
|
||||
- [Official Website](https://arcane.ofkm.dev/)
|
||||
- [GitHub Project](https://github.com/ofkm/arcane)
|
||||
- [Official Documentation](https://arcane.ofkm.dev/docs/)
|
||||
- [Docker Hub](https://ghcr.io/ofkm/arcane)
|
||||
31
apps/arcane/data.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Arcane
|
||||
tags:
|
||||
- 实用工具
|
||||
title: 现代化Docker管理面板
|
||||
description: Arcane 是一款现代化、开源的Docker管理Web面板,支持容器、镜像、网络等一站式管理
|
||||
additionalProperties:
|
||||
key: arcane
|
||||
name: Arcane
|
||||
tags:
|
||||
- Tool
|
||||
shortDescZh: 现代化Docker管理面板
|
||||
shortDescEn: Modern Docker management panel
|
||||
description:
|
||||
en: Arcane is a modern, open-source Docker management web panel for containers, images, networks and more
|
||||
ja: Arcaneはコンテナ、イメージ、ネットワークなどを一元管理できるモダンなオープンソースDocker管理Webパネルです
|
||||
ms: Arcane ialah panel web pengurusan Docker moden sumber terbuka untuk kontena, imej, rangkaian dan banyak lagi
|
||||
pt-br: Arcane é um painel web moderno e de código aberto para gerenciamento de Docker, incluindo containers, imagens, redes e mais
|
||||
ru: Arcane — это современная, открытая веб-панель управления Docker для контейнеров, образов, сетей и др.
|
||||
ko: Arcane는 컨테이너, 이미지, 네트워크 등을 위한 현대적이고 오픈 소스인 Docker 관리 웹 패널입니다
|
||||
zh-Hant: Arcane 是一款現代化、開源的 Docker 管理 Web 面板,支援容器、映像、網路等一站式管理
|
||||
zh: Arcane 是一款现代化、开源的Docker管理Web面板,支持容器、镜像、网络等一站式管理
|
||||
type: website
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 0
|
||||
website: https://arcane.ofkm.dev/
|
||||
github: https://github.com/ofkm/arcane
|
||||
document: https://arcane.ofkm.dev/docs/
|
||||
architectures:
|
||||
- amd64
|
||||
- arm64
|
||||
35
apps/arcane/latest/data.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: 3000
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Web UI Port
|
||||
labelZh: Web界面端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
label:
|
||||
en: Web UI Port
|
||||
ja: Web UI ポート
|
||||
ms: Port UI Web
|
||||
pt-br: Porta da interface web
|
||||
ru: Веб-порт интерфейса
|
||||
ko: 웹 UI 포트
|
||||
zh-Hant: Web UI 埠
|
||||
zh: Web界面端口
|
||||
- default: "arcane-session-4e2b8c7f9d1a6e3b2c5d7f8a1b0c9e6d"
|
||||
edit: true
|
||||
envKey: PUBLIC_SESSION_SECRET
|
||||
labelEn: Session Secret
|
||||
labelZh: 会话密钥
|
||||
required: true
|
||||
type: text
|
||||
label:
|
||||
en: Session Secret
|
||||
ja: セッションシークレット
|
||||
ms: Rahsia Sesi
|
||||
pt-br: Segredo da sessão
|
||||
ru: Секрет сессии
|
||||
ko: 세션 시크릿
|
||||
zh-Hant: 會話密鑰
|
||||
zh: 会话密钥
|
||||
18
apps/arcane/latest/docker-compose.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
services:
|
||||
arcane:
|
||||
image: ghcr.io/ofkm/arcane:latest
|
||||
container_name: ${CONTAINER_NAME}
|
||||
ports:
|
||||
- "${PANEL_APP_PORT_HTTP}:3000"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./data:/app/data
|
||||
environment:
|
||||
- APP_ENV=production
|
||||
- PUID=2000
|
||||
- PGID=2000
|
||||
- PUBLIC_SESSION_SECRET=${PUBLIC_SESSION_SECRET}
|
||||
restart: always
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
BIN
apps/arcane/logo.png
Normal file
|
After Width: | Height: | Size: 67 KiB |
11
apps/blinko/1.0.3/.env.sample
Normal file
@@ -0,0 +1,11 @@
|
||||
CONTAINER_NAME="blinko"
|
||||
NEXTAUTH_SECRET="my_ultra_secure_nextauth_secret"
|
||||
NEXTAUTH_URL="http://1.2.3.4:1111"
|
||||
NEXT_PUBLIC_BASE_URL="http://1.2.3.4:1111"
|
||||
PANEL_APP_PORT_HTTP=1111
|
||||
PANEL_DB_HOST="postgresql"
|
||||
PANEL_DB_HOST_NAME="postgresql"
|
||||
PANEL_DB_NAME="blinko"
|
||||
PANEL_DB_PORT=5432
|
||||
PANEL_DB_USER="blinko"
|
||||
PANEL_DB_USER_PASSWORD="blinko"
|
||||
70
apps/blinko/1.0.3/data.yml
Normal file
@@ -0,0 +1,70 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: "1111"
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: HTTP Port
|
||||
labelZh: HTTP 端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: "http://1.2.3.4:1111"
|
||||
envKey: NEXTAUTH_URL
|
||||
labelEn: NextAuth URL
|
||||
labelZh: 基本 URL
|
||||
required: true
|
||||
rule: paramExtUrl
|
||||
type: text
|
||||
- default: "http://1.2.3.4:1111"
|
||||
envKey: NEXT_PUBLIC_BASE_URL
|
||||
labelEn: Next Public Base URL
|
||||
labelZh: 公共基本 URL
|
||||
required: true
|
||||
rule: paramExtUrl
|
||||
type: text
|
||||
- default: "my_ultra_secure_nextauth_secret"
|
||||
envKey: NEXTAUTH_SECRET
|
||||
labelEn: NextAuth Secret
|
||||
labelZh: NextAuth 密钥
|
||||
random: true
|
||||
required: true
|
||||
rule: paramComplexity
|
||||
type: password
|
||||
- default: ""
|
||||
envKey: PANEL_DB_HOST
|
||||
key: postgresql
|
||||
labelEn: PostgreSQL Database Service
|
||||
labelZh: PostgreSQL 数据库服务
|
||||
required: true
|
||||
type: service
|
||||
- default: "5432"
|
||||
edit: true
|
||||
envKey: PANEL_DB_PORT
|
||||
labelEn: Database Port Number
|
||||
labelZh: 数据库端口号
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: "blinko"
|
||||
envKey: PANEL_DB_NAME
|
||||
labelEn: Database
|
||||
labelZh: 数据库名
|
||||
random: true
|
||||
required: true
|
||||
rule: paramCommon
|
||||
type: text
|
||||
- default: "blinko"
|
||||
envKey: PANEL_DB_USER
|
||||
labelEn: User
|
||||
labelZh: 数据库用户
|
||||
random: true
|
||||
required: true
|
||||
rule: paramCommon
|
||||
type: text
|
||||
- default: "blinko"
|
||||
envKey: PANEL_DB_USER_PASSWORD
|
||||
labelEn: Password
|
||||
labelZh: 数据库用户密码
|
||||
random: true
|
||||
required: true
|
||||
rule: paramComplexity
|
||||
type: password
|
||||
35
apps/blinko/1.0.3/docker-compose.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
services:
|
||||
blinko:
|
||||
image: "blinkospace/blinko:1.0.3"
|
||||
container_name: ${CONTAINER_NAME}
|
||||
environment:
|
||||
NODE_ENV: production
|
||||
NEXTAUTH_URL: ${NEXTAUTH_URL}
|
||||
NEXT_PUBLIC_BASE_URL: ${NEXT_PUBLIC_BASE_URL}
|
||||
NEXTAUTH_SECRET: ${NEXTAUTH_SECRET}
|
||||
DATABASE_URL: postgresql://${PANEL_DB_USER}:${PANEL_DB_USER_PASSWORD}@${PANEL_DB_HOST}:${PANEL_DB_PORT}/${PANEL_DB_NAME}
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- "./data:/app/.blinko"
|
||||
restart: always
|
||||
logging:
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
ports:
|
||||
- "${PANEL_APP_PORT_HTTP}:1111"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:1111/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
networks:
|
||||
- 1panel-network
|
||||
labels:
|
||||
createdBy: "Apps"
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
49
apps/blinko/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
|
||||
# Blinko - 开源、自托管的笔记应用
|
||||
|
||||
Blinko 是一个创新的开源项目,专为那些想要快速捕捉和组织灵感的人设计。它允许用户在灵感闪现的瞬间无缝记录想法,确保不错过任何创意火花。
|
||||
|
||||
[在线演示](https://demo.blinko.space) •
|
||||
[文档](https://docs.blinko.space/introduction) •
|
||||
|
||||
> 在线演示账号: username:blinko password:blinko
|
||||
|
||||
## 🚀主要特性
|
||||
- 🤖**AI 增强笔记检索**:通过 Blinko 的先进 AI 驱动的 RAG(检索增强生成)技术,你可以使用自然语言查询快速搜索和访问笔记,轻松找到所需内容。支持 OpenAI、Azure OpenAI 和 Ollama。
|
||||
|
||||
- 🔒**数据所有权**:你的隐私至关重要。所有笔记和数据都安全存储在你的自托管环境中,确保对信息的完全控制。
|
||||
|
||||
- 🚀**高效快速**:即时捕捉想法并以纯文本形式存储以便于访问,完全支持 Markdown 格式,便于快速排版和分享。
|
||||
|
||||
- 💡**轻量架构,多端支持**:基于 Tauri 构建的 Blinko 采用简洁轻量的架构,在保持卓越速度和效率的同时,并且支持Macos,Windows,Android,Linux等多平台。
|
||||
|
||||
- 🔓**开放协作**:作为开源项目,Blinko 欢迎社区贡献。所有代码都在 GitHub 上公开透明,培养协作和持续改进的精神。
|
||||
|
||||
## 🤖 AI 模型支持
|
||||
### OpenAI
|
||||
- 支持 OpenAI API
|
||||
- 支持自定义 API 地址
|
||||
- 支持 Azure OpenAI
|
||||
|
||||
### Ollama
|
||||
- 支持本地部署
|
||||
- 默认地址:http://127.0.0.1:11434
|
||||
- 支持所有 Ollama 模型
|
||||
- 完全免费,无需 API Key
|
||||
|
||||
## 📦快速开始(Docker Compose)
|
||||
|
||||
```bash
|
||||
curl -s https://raw.githubusercontent.com/blinko-space/blinko/main/install.sh | bash
|
||||
```
|
||||
|
||||
## 环境配置
|
||||
|
||||
您可以通过设置不同的选项在运行时配置Blinko服务器。这些选项既可以作为环境变量定义,也可以作为命令行参数在启动服务器时指定。如果两种方法同时使用,命令行参数将优先于环境变量。以下是可用的配置选项列表:
|
||||
|
||||
| 环境变量 | 描述 |
|
||||
|------------------------|------------------------------------------------|
|
||||
| NEXTAUTH_URL | 指定应用程序的基础URL,通常是部署站点的根URL,用于身份验证回调和重定向。 |
|
||||
| NEXT_PUBLIC_BASE_URL | 定义应用程序的公共基础URL,用作前端和API请求的基础路径。 |
|
||||
| NEXTAUTH_SECRET | 用于加密会话和身份验证令牌的密钥,以确保用户数据的安全性。 |
|
||||
| DATABASE_URL | 数据库连接URL,用于连接和访问Blinko的数据库。 |
|
||||
19
apps/blinko/data.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Blinko
|
||||
tags:
|
||||
- 实用工具
|
||||
title: 一款开源、自托管的个人笔记工具
|
||||
description: 一款开源、自托管的个人笔记工具
|
||||
additionalProperties:
|
||||
key: blinko
|
||||
name: Blinko
|
||||
tags:
|
||||
- Tool
|
||||
shortDescZh: 一款开源、自托管的个人笔记工具
|
||||
shortDescEn: An open-source, self-hosted personal note tool
|
||||
type: tool
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 0
|
||||
website: https://blinko-demo.vercel.app
|
||||
github: https://github.com/blinko-space/blinko
|
||||
document: https://blinko-doc.vercel.app/intro.html
|
||||
BIN
apps/blinko/logo.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
24
apps/cup/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# cup
|
||||
|
||||
Cup 是检查容器镜像更新最简单的方法.
|
||||
|
||||

|
||||
|
||||
### 特色✨
|
||||
- 速度超快。Cup 充分利用了您的 CPU 资源,并经过高度优化,带来闪电般的速度。在我的 Raspberry Pi 5 上,58 张图片的读取仅用了 3.7 秒!
|
||||
- 支持大多数注册中心,包括 Docker Hub、ghcr.io、Quay、lscr.io 甚至 Gitea(或衍生产品)
|
||||
- 不会耗尽任何速率限制。这正是我创建 Cup 的初衷。我觉得这个功能现在尤其重要,因为Docker Hub 正在降低未经身份验证用户的拉取限制。
|
||||
- 漂亮的 CLI 和 Web 界面,可随时检查您的容器。
|
||||
- 二进制文件非常小巧!撰写本文时,它只有 5.4 MB。无需再为如此简单的程序拉取 100 多 MB 的 Docker 镜像。
|
||||
- CLI 和 Web 界面均提供 JSON 输出,方便您将 Cup 连接到集成。它易于解析,并且可以轻松设置 Webhook 和美观的仪表板!
|
||||
|
||||
### 文档📘
|
||||
看看https://cup.sergi0g.dev/docs!
|
||||
|
||||
### 限制
|
||||
|
||||
```
|
||||
Cup 仍在开发中。它可能没有其他替代品那么多功能。如果其中某个功能对您来说非常重要,请考虑使用其他工具。
|
||||
|
||||
Cup 无法直接触发您的集成。如果您希望自动触发,请使用 What's up Docker。Cup 的设计初衷很简单。数据就在那里,您可以自行检索(例如,通过cup check -rcronjob 运行或定期/api/v3/json从服务器请求 URL)。
|
||||
```
|
||||
25
apps/cup/data.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
name: cup
|
||||
tags:
|
||||
- 实用工具
|
||||
title: 自动检测 Docker 容器基础镜像的工具
|
||||
description: 自动检测 Docker 容器基础镜像的工具
|
||||
additionalProperties:
|
||||
key: cup
|
||||
name: cup
|
||||
tags:
|
||||
- Tool
|
||||
shortDescZh: 自动检测 Docker 容器基础镜像的工具
|
||||
shortDescEn: Docker container updates made easy
|
||||
type: website
|
||||
crossVersionUpdate: true
|
||||
limit: 1
|
||||
recommend: 0
|
||||
website: https://cup.sergi0g.dev/
|
||||
github: https://github.com/sergi0g/cup
|
||||
document: https://cup.sergi0g.dev/docs
|
||||
description:
|
||||
en: Docker container updates made easy
|
||||
zh: 自动检测 Docker 容器基础镜像的工具
|
||||
architectures:
|
||||
- amd64
|
||||
- arm64
|
||||
10
apps/cup/latest/data.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: "51230"
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Service Port 8000
|
||||
labelZh: 服务端口 8000
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
17
apps/cup/latest/docker-compose.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
services:
|
||||
cup:
|
||||
image: ghcr.io/sergi0g/cup:latest
|
||||
container_name: ${CONTAINER_NAME}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- ${PANEL_APP_PORT_HTTP}:8000
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
networks:
|
||||
- 1panel-network
|
||||
labels:
|
||||
createdBy: Apps
|
||||
command: serve
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
BIN
apps/cup/logo.png
Normal file
|
After Width: | Height: | Size: 2.9 KiB |
31
apps/deepseek-free-api/README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
### 工具介绍
|
||||
|
||||
🚀 DeepSeek-V3 & R1大模型逆向API【特长:良心厂商】(官方贼便宜,建议直接走官方),支持高速流式输出、多轮对话,联网搜索,R1深度思考,零配置部署,多路token支持,仅供测试,如需商用请前往官方开放平台。
|
||||
|
||||
### 风险说明
|
||||
|
||||
- 逆向API是不稳定的,建议前往DeepSeek官方 https://platform.deepseek.com/ 付费使用API,避免封禁的风险。
|
||||
|
||||
- 本组织和个人不接受任何资金捐助和交易,此项目是纯粹研究交流学习性质!
|
||||
|
||||
- 仅限自用,禁止对外提供服务或商用,避免对官方造成服务压力,否则风险自担!
|
||||
|
||||
### 使用说明
|
||||
|
||||
请确保您在中国境内或者拥有中国境内的个人计算设备,否则部署后可能因无法访问DeepSeek而无法使用。
|
||||
|
||||
从 [DeepSeek](https://chat.deepseek.com/) 获取userToken value
|
||||
|
||||
进入DeepSeek随便发起一个对话,然后F12打开开发者工具,从Application > LocalStorage中找到`userToken`中的value值,复制这个值填写到Lobechat或者CherryStudio等工具中,作为API密钥,API地址是你部署应用的IP加端口,例如:`https://192.168.1.105:8001/v1/chat/completions`,注意某些工具只需要填写`https://192.168.1.105:8001/`即可。
|
||||
|
||||
[](https://cdn.jsdelivr.net/gh/LLM-Red-Team/deepseek-free-api@master/doc/example-0.png)
|
||||
|
||||
### 多账号接入
|
||||
|
||||
目前同个账号同时只能有*一路*输出,你可以通过提供多个账号的userToken value并使用`,`拼接提供:
|
||||
|
||||
```
|
||||
API密钥:TOKEN1,TOKEN2,TOKEN3
|
||||
```
|
||||
|
||||
每次请求服务会从中挑选一个。
|
||||
24
apps/deepseek-free-api/data.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Deepseek-Free-API
|
||||
tags:
|
||||
- AI / 大模型
|
||||
title: DeepSeek-V3 & R1大模型逆向API
|
||||
description: DeepSeek V3 Free 服务
|
||||
additionalProperties:
|
||||
key: deepseek-free-api
|
||||
name: Deepseek-Free-API
|
||||
tags:
|
||||
- AI
|
||||
- Tools
|
||||
shortDescZh: DeepSeek-V3 & R1大模型逆向API
|
||||
shortDescEn: A 1Panel deployment for deepseek-free-api
|
||||
type: website
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 0
|
||||
architectures:
|
||||
- amd64
|
||||
- arm64
|
||||
|
||||
website: https://platform.deepseek.com/
|
||||
github: https://github.com/LLM-Red-Team/deepseek-free-api
|
||||
document: https://github.com/LLM-Red-Team/deepseek-free-api
|
||||
10
apps/deepseek-free-api/latest/data.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: "8001"
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Service Port
|
||||
labelZh: 服务端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
16
apps/deepseek-free-api/latest/docker-compose.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
services:
|
||||
deepseek-free-api:
|
||||
image: vinlic/deepseek-free-api:latest
|
||||
container_name: ${CONTAINER_NAME}
|
||||
ports:
|
||||
- ${PANEL_APP_PORT_HTTP}:8000
|
||||
networks:
|
||||
- 1panel-network
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
labels:
|
||||
createdBy: Apps
|
||||
restart: always
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
BIN
apps/deepseek-free-api/logo.png
Normal file
|
After Width: | Height: | Size: 38 KiB |
13
apps/demo/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# 1Panel Apps
|
||||
|
||||
这是一款适配 1Panel 应用商店的通用应用模板,
|
||||
|
||||
旨在简化 Docker 应用的快速适配过程,让用户轻松将所需应用集成至 1Panel 应用商店。
|
||||
|
||||
它能够有效解决非商店应用无法使用 1Panel 快照和应用备份功能的问题。
|
||||
|
||||
## 使用说明
|
||||
|
||||
- 可以按需修改安装界面的参数
|
||||
|
||||
- 也可以直接忽视安装界面提供的参数,然后勾选`“高级设置”`,勾选`“编辑compose文件”`,使用自定义的 `docker-compose.yml`文件
|
||||
9
apps/demo/bridge-network/.env.sample
Normal file
@@ -0,0 +1,9 @@
|
||||
CONTAINER_NAME="1panel-apps"
|
||||
DATA_PATH="./data"
|
||||
DATA_PATH_INTERNAL="/data"
|
||||
ENV1=""
|
||||
IMAGE=""
|
||||
PANEL_APP_PORT_HTTP=40329
|
||||
PANEL_APP_PORT_HTTP_INTERNAL=40329
|
||||
RESTART_POLICY="always"
|
||||
TIME_ZONE="Asia/Shanghai"
|
||||
69
apps/demo/bridge-network/data.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: ""
|
||||
edit: true
|
||||
envKey: IMAGE
|
||||
labelEn: Docker Image
|
||||
labelZh: Docker 镜像
|
||||
required: true
|
||||
type: text
|
||||
- default: "always"
|
||||
edit: true
|
||||
envKey: RESTART_POLICY
|
||||
labelEn: Restart Policy
|
||||
labelZh: 重启策略
|
||||
required: true
|
||||
type: select
|
||||
values:
|
||||
- label: "Always"
|
||||
value: "always"
|
||||
- label: "Unless Stopped"
|
||||
value: "unless-stopped"
|
||||
- label: "On Failure"
|
||||
value: "on-failure"
|
||||
- label: "No"
|
||||
value: "no"
|
||||
- default: "40329"
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Port
|
||||
labelZh: 端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: "40329"
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP_INTERNAL
|
||||
labelEn: Internal Port
|
||||
labelZh: 内部端口
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: "./data"
|
||||
edit: true
|
||||
envKey: DATA_PATH
|
||||
labelEn: Data Path
|
||||
labelZh: 数据路径
|
||||
required: true
|
||||
type: text
|
||||
- default: "/data"
|
||||
edit: true
|
||||
envKey: DATA_PATH_INTERNAL
|
||||
labelEn: Internal Data Path
|
||||
labelZh: 内部数据路径
|
||||
required: true
|
||||
type: text
|
||||
- default: "Asia/Shanghai"
|
||||
edit: true
|
||||
envKey: TIME_ZONE
|
||||
labelEn: Time Zone
|
||||
labelZh: 时区
|
||||
required: true
|
||||
type: text
|
||||
- default: ""
|
||||
edit: true
|
||||
envKey: ENV1
|
||||
labelEn: Environment Variable 1 (Edit to remove comments in compose.yml to take effect)
|
||||
labelZh: 环境变量 1 (编辑去除compose.yml里的注释生效)
|
||||
required: false
|
||||
type: text
|
||||
22
apps/demo/bridge-network/docker-compose.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
services:
|
||||
1panel-apps:
|
||||
image: ${IMAGE}
|
||||
container_name: ${CONTAINER_NAME}
|
||||
restart: ${RESTART_POLICY}
|
||||
networks:
|
||||
- 1panel-network
|
||||
ports:
|
||||
- "${PANEL_APP_PORT_HTTP}:${PANEL_APP_PORT_HTTP_INTERNAL}"
|
||||
volumes:
|
||||
- "${DATA_PATH}:${DATA_PATH_INTERNAL}"
|
||||
environment:
|
||||
# 环境参数按需修改 (Modify the environment parameters as required)
|
||||
- TZ=${TIME_ZONE}
|
||||
# 删除以下行前的#号表示启用 (Delete the # sign in front of the following lines to indicate enablement)
|
||||
# - ${ENV1}=${ENV1}
|
||||
labels:
|
||||
createdBy: "Apps"
|
||||
|
||||
networks:
|
||||
1panel-network:
|
||||
external: true
|
||||
19
apps/demo/data.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: 1Panel Apps
|
||||
tags:
|
||||
- 建站
|
||||
title: 适配 1Panel 应用商店的通用应用模板
|
||||
description: 适配 1Panel 应用商店的通用应用模板
|
||||
additionalProperties:
|
||||
key: 1panel-apps
|
||||
name: 1Panel Apps
|
||||
tags:
|
||||
- Website
|
||||
shortDescZh: 适配 1Panel 应用商店的通用应用模板
|
||||
shortDescEn: Universal app template for the 1Panel App Store
|
||||
type: website
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 0
|
||||
website: https://github.com/okxlin/appstore
|
||||
github: https://github.com/okxlin/appstore
|
||||
document: https://github.com/okxlin/appstore
|
||||
8
apps/demo/host-network/.env.sample
Normal file
@@ -0,0 +1,8 @@
|
||||
CONTAINER_NAME="1panel-apps"
|
||||
DATA_PATH="./data"
|
||||
DATA_PATH_INTERNAL="/data"
|
||||
ENV1=""
|
||||
IMAGE=""
|
||||
PANEL_APP_PORT_HTTP=40329
|
||||
RESTART_POLICY="always"
|
||||
TIME_ZONE="Asia/Shanghai"
|
||||
61
apps/demo/host-network/data.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: ""
|
||||
edit: true
|
||||
envKey: IMAGE
|
||||
labelEn: Docker Image
|
||||
labelZh: Docker 镜像
|
||||
required: true
|
||||
type: text
|
||||
- default: "always"
|
||||
edit: true
|
||||
envKey: RESTART_POLICY
|
||||
labelEn: Restart Policy
|
||||
labelZh: 重启策略
|
||||
required: true
|
||||
type: select
|
||||
values:
|
||||
- label: "Always"
|
||||
value: "always"
|
||||
- label: "Unless Stopped"
|
||||
value: "unless-stopped"
|
||||
- label: "On Failure"
|
||||
value: "on-failure"
|
||||
- label: "No"
|
||||
value: "no"
|
||||
- default: "40329"
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelEn: Port (determined by the Docker application itself)
|
||||
labelZh: 端口 (由 Docker 应用自身决定)
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: "./data"
|
||||
edit: true
|
||||
envKey: DATA_PATH
|
||||
labelEn: Data Path
|
||||
labelZh: 数据路径
|
||||
required: true
|
||||
type: text
|
||||
- default: "/data"
|
||||
edit: true
|
||||
envKey: DATA_PATH_INTERNAL
|
||||
labelEn: Internal Data Path
|
||||
labelZh: 内部数据路径
|
||||
required: true
|
||||
type: text
|
||||
- default: "Asia/Shanghai"
|
||||
edit: true
|
||||
envKey: TIME_ZONE
|
||||
labelEn: Time Zone
|
||||
labelZh: 时区
|
||||
required: true
|
||||
type: text
|
||||
- default: ""
|
||||
edit: true
|
||||
envKey: ENV1
|
||||
labelEn: Environment Variable 1 (Edit to remove comments in compose.yml to take effect)
|
||||
labelZh: 环境变量 1 (编辑去除compose.yml里的注释生效)
|
||||
required: false
|
||||
type: text
|
||||
15
apps/demo/host-network/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
1panel-apps:
|
||||
image: ${IMAGE}
|
||||
container_name: ${CONTAINER_NAME}
|
||||
restart: ${RESTART_POLICY}
|
||||
network_mode: host
|
||||
volumes:
|
||||
- "${DATA_PATH}:${DATA_PATH_INTERNAL}"
|
||||
environment:
|
||||
# 环境参数按需修改 (Modify the environment parameters as required)
|
||||
- TZ=${TIME_ZONE}
|
||||
# 删除以下行前的#号表示启用 (Delete the # sign in front of the following lines to indicate enablement)
|
||||
# - ${ENV1}=${ENV1}
|
||||
labels:
|
||||
createdBy: "Apps"
|
||||
BIN
apps/demo/logo.png
Normal file
|
After Width: | Height: | Size: 4.7 KiB |
76
apps/dify/1.1.1/conf/certbot/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Launching new servers with SSL certificates
|
||||
|
||||
## Short description
|
||||
|
||||
docker compose certbot configurations with Backward compatibility (without certbot container).
|
||||
Use `docker compose --profile certbot up` to use this features.
|
||||
|
||||
## The simplest way for launching new servers with SSL certificates
|
||||
|
||||
1. Get letsencrypt certs
|
||||
set `.env` values
|
||||
```properties
|
||||
NGINX_SSL_CERT_FILENAME=fullchain.pem
|
||||
NGINX_SSL_CERT_KEY_FILENAME=privkey.pem
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE=true
|
||||
CERTBOT_DOMAIN=your_domain.com
|
||||
CERTBOT_EMAIL=example@your_domain.com
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker network prune
|
||||
docker compose --profile certbot up --force-recreate -d
|
||||
```
|
||||
then after the containers launched:
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
2. Edit `.env` file and `docker compose --profile certbot up` again.
|
||||
set `.env` value additionally
|
||||
```properties
|
||||
NGINX_HTTPS_ENABLED=true
|
||||
```
|
||||
execute command:
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate nginx
|
||||
```
|
||||
Then you can access your serve with HTTPS.
|
||||
[https://your_domain.com](https://your_domain.com)
|
||||
|
||||
## SSL certificates renewal
|
||||
|
||||
For SSL certificates renewal, execute commands below:
|
||||
|
||||
```shell
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## Options for certbot
|
||||
|
||||
`CERTBOT_OPTIONS` key might be helpful for testing. i.e.,
|
||||
|
||||
```properties
|
||||
CERTBOT_OPTIONS=--dry-run
|
||||
```
|
||||
|
||||
To apply changes to `CERTBOT_OPTIONS`, regenerate the certbot container before updating the certificates.
|
||||
|
||||
```shell
|
||||
docker compose --profile certbot up -d --no-deps --force-recreate certbot
|
||||
docker compose exec -it certbot /bin/sh /update-cert.sh
|
||||
```
|
||||
|
||||
Then, reload the nginx container if necessary.
|
||||
|
||||
```shell
|
||||
docker compose exec nginx nginx -s reload
|
||||
```
|
||||
|
||||
## For legacy servers
|
||||
|
||||
To use cert files dir `nginx/ssl` as before, simply launch containers WITHOUT `--profile certbot` option.
|
||||
|
||||
```shell
|
||||
docker compose up -d
|
||||
```
|
||||
30
apps/dify/1.1.1/conf/certbot/docker-entrypoint.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
printf '%s\n' "Docker entrypoint script is running"
|
||||
|
||||
printf '%s\n' "\nChecking specific environment variables:"
|
||||
printf '%s\n' "CERTBOT_EMAIL: ${CERTBOT_EMAIL:-Not set}"
|
||||
printf '%s\n' "CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-Not set}"
|
||||
printf '%s\n' "CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-Not set}"
|
||||
|
||||
printf '%s\n' "\nChecking mounted directories:"
|
||||
for dir in "/etc/letsencrypt" "/var/www/html" "/var/log/letsencrypt"; do
|
||||
if [ -d "$dir" ]; then
|
||||
printf '%s\n' "$dir exists. Contents:"
|
||||
ls -la "$dir"
|
||||
else
|
||||
printf '%s\n' "$dir does not exist."
|
||||
fi
|
||||
done
|
||||
|
||||
printf '%s\n' "\nGenerating update-cert.sh from template"
|
||||
sed -e "s|\${CERTBOT_EMAIL}|$CERTBOT_EMAIL|g" \
|
||||
-e "s|\${CERTBOT_DOMAIN}|$CERTBOT_DOMAIN|g" \
|
||||
-e "s|\${CERTBOT_OPTIONS}|$CERTBOT_OPTIONS|g" \
|
||||
/update-cert.template.txt > /update-cert.sh
|
||||
|
||||
chmod +x /update-cert.sh
|
||||
|
||||
printf '%s\n' "\nExecuting command:" "$@"
|
||||
exec "$@"
|
||||
19
apps/dify/1.1.1/conf/certbot/update-cert.template.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
DOMAIN="${CERTBOT_DOMAIN}"
|
||||
EMAIL="${CERTBOT_EMAIL}"
|
||||
OPTIONS="${CERTBOT_OPTIONS}"
|
||||
CERT_NAME="${DOMAIN}" # 証明書名をドメイン名と同じにする
|
||||
|
||||
# Check if the certificate already exists
|
||||
if [ -f "/etc/letsencrypt/renewal/${CERT_NAME}.conf" ]; then
|
||||
echo "Certificate exists. Attempting to renew..."
|
||||
certbot renew --noninteractive --cert-name ${CERT_NAME} --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email ${OPTIONS}
|
||||
else
|
||||
echo "Certificate does not exist. Obtaining a new certificate..."
|
||||
certbot certonly --noninteractive --webroot --webroot-path=/var/www/html --email ${EMAIL} --agree-tos --no-eff-email -d ${DOMAIN} ${OPTIONS}
|
||||
fi
|
||||
echo "Certificate operation successful"
|
||||
# Note: Nginx reload should be handled outside this container
|
||||
echo "Please ensure to reload Nginx to apply any certificate changes."
|
||||
4
apps/dify/1.1.1/conf/couchbase-server/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM couchbase/server:latest AS stage_base
|
||||
# FROM couchbase:latest AS stage_base
|
||||
COPY init-cbserver.sh /opt/couchbase/init/
|
||||
RUN chmod +x /opt/couchbase/init/init-cbserver.sh
|
||||
44
apps/dify/1.1.1/conf/couchbase-server/init-cbserver.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# used to start couchbase server - can't get around this as docker compose only allows you to start one command - so we have to start couchbase like the standard couchbase Dockerfile would
|
||||
# https://github.com/couchbase/docker/blob/master/enterprise/couchbase-server/7.2.0/Dockerfile#L88
|
||||
|
||||
/entrypoint.sh couchbase-server &
|
||||
|
||||
# track if setup is complete so we don't try to setup again
|
||||
FILE=/opt/couchbase/init/setupComplete.txt
|
||||
|
||||
if ! [ -f "$FILE" ]; then
|
||||
# used to automatically create the cluster based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-cluster-init.html
|
||||
|
||||
echo $COUCHBASE_ADMINISTRATOR_USERNAME ":" $COUCHBASE_ADMINISTRATOR_PASSWORD
|
||||
|
||||
sleep 20s
|
||||
/opt/couchbase/bin/couchbase-cli cluster-init -c 127.0.0.1 \
|
||||
--cluster-username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--cluster-password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--services data,index,query,fts \
|
||||
--cluster-ramsize $COUCHBASE_RAM_SIZE \
|
||||
--cluster-index-ramsize $COUCHBASE_INDEX_RAM_SIZE \
|
||||
--cluster-eventing-ramsize $COUCHBASE_EVENTING_RAM_SIZE \
|
||||
--cluster-fts-ramsize $COUCHBASE_FTS_RAM_SIZE \
|
||||
--index-storage-setting default
|
||||
|
||||
sleep 2s
|
||||
|
||||
# used to auto create the bucket based on environment variables
|
||||
# https://docs.couchbase.com/server/current/cli/cbcli/couchbase-cli-bucket-create.html
|
||||
|
||||
/opt/couchbase/bin/couchbase-cli bucket-create -c localhost:8091 \
|
||||
--username $COUCHBASE_ADMINISTRATOR_USERNAME \
|
||||
--password $COUCHBASE_ADMINISTRATOR_PASSWORD \
|
||||
--bucket $COUCHBASE_BUCKET \
|
||||
--bucket-ramsize $COUCHBASE_BUCKET_RAMSIZE \
|
||||
--bucket-type couchbase
|
||||
|
||||
# create file so we know that the cluster is setup and don't run the setup again
|
||||
touch $FILE
|
||||
fi
|
||||
# docker compose will stop the container from running unless we do this
|
||||
# known issue and workaround
|
||||
tail -f /dev/null
|
||||
25
apps/dify/1.1.1/conf/elasticsearch/docker-entrypoint.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ "${VECTOR_STORE}" = "elasticsearch-ja" ]; then
|
||||
# Check if the ICU tokenizer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-icu; then
|
||||
printf '%s\n' "Installing the ICU tokenizer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-icu; then
|
||||
printf '%s\n' "Failed to install the ICU tokenizer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
# Check if the Japanese language analyzer plugin is installed
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin list | grep -q analysis-kuromoji; then
|
||||
printf '%s\n' "Installing the Japanese language analyzer plugin"
|
||||
if ! /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-kuromoji; then
|
||||
printf '%s\n' "Failed to install the Japanese language analyzer plugin"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run the original entrypoint script
|
||||
exec /bin/tini -- /usr/local/bin/docker-entrypoint.sh
|
||||
48
apps/dify/1.1.1/conf/nginx/conf.d/default.conf.template
Normal file
@@ -0,0 +1,48 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
server {
|
||||
listen ${NGINX_PORT};
|
||||
server_name ${NGINX_SERVER_NAME};
|
||||
|
||||
location /console/api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /v1 {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /files {
|
||||
proxy_pass http://api:5001;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /explore {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location /e/ {
|
||||
proxy_pass http://plugin_daemon:5002;
|
||||
proxy_set_header Dify-Hook-Url $scheme://$host$request_uri;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://web:3000;
|
||||
include proxy.conf;
|
||||
}
|
||||
|
||||
# placeholder for acme challenge location
|
||||
${ACME_CHALLENGE_LOCATION}
|
||||
|
||||
# placeholder for https config defined in https.conf.template
|
||||
${HTTPS_CONFIG}
|
||||
}
|
||||
39
apps/dify/1.1.1/conf/nginx/docker-entrypoint.sh
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "${NGINX_HTTPS_ENABLED}" = "true" ]; then
|
||||
# Check if the certificate and key files for the specified domain exist
|
||||
if [ -n "${CERTBOT_DOMAIN}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}" ] && \
|
||||
[ -f "/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}" ]; then
|
||||
SSL_CERTIFICATE_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/letsencrypt/live/${CERTBOT_DOMAIN}/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
else
|
||||
SSL_CERTIFICATE_PATH="/etc/ssl/${NGINX_SSL_CERT_FILENAME}"
|
||||
SSL_CERTIFICATE_KEY_PATH="/etc/ssl/${NGINX_SSL_CERT_KEY_FILENAME}"
|
||||
fi
|
||||
export SSL_CERTIFICATE_PATH
|
||||
export SSL_CERTIFICATE_KEY_PATH
|
||||
|
||||
# set the HTTPS_CONFIG environment variable to the content of the https.conf.template
|
||||
HTTPS_CONFIG=$(envsubst < /etc/nginx/https.conf.template)
|
||||
export HTTPS_CONFIG
|
||||
# Substitute the HTTPS_CONFIG in the default.conf.template with content from https.conf.template
|
||||
envsubst '${HTTPS_CONFIG}' < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
fi
|
||||
|
||||
if [ "${NGINX_ENABLE_CERTBOT_CHALLENGE}" = "true" ]; then
|
||||
ACME_CHALLENGE_LOCATION='location /.well-known/acme-challenge/ { root /var/www/html; }'
|
||||
else
|
||||
ACME_CHALLENGE_LOCATION=''
|
||||
fi
|
||||
export ACME_CHALLENGE_LOCATION
|
||||
|
||||
env_vars=$(printenv | cut -d= -f1 | sed 's/^/$/g' | paste -sd, -)
|
||||
|
||||
envsubst "$env_vars" < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
|
||||
envsubst "$env_vars" < /etc/nginx/proxy.conf.template > /etc/nginx/proxy.conf
|
||||
|
||||
envsubst < /etc/nginx/conf.d/default.conf.template > /etc/nginx/conf.d/default.conf
|
||||
|
||||
# Start Nginx using the default entrypoint
|
||||
exec nginx -g 'daemon off;'
|
||||
9
apps/dify/1.1.1/conf/nginx/https.conf.template
Normal file
@@ -0,0 +1,9 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
listen ${NGINX_SSL_PORT} ssl;
|
||||
ssl_certificate ${SSL_CERTIFICATE_PATH};
|
||||
ssl_certificate_key ${SSL_CERTIFICATE_KEY_PATH};
|
||||
ssl_protocols ${NGINX_SSL_PROTOCOLS};
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
||||
34
apps/dify/1.1.1/conf/nginx/nginx.conf.template
Normal file
@@ -0,0 +1,34 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
user nginx;
|
||||
worker_processes ${NGINX_WORKER_PROCESSES};
|
||||
|
||||
error_log /var/log/nginx/error.log notice;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
#tcp_nopush on;
|
||||
|
||||
keepalive_timeout ${NGINX_KEEPALIVE_TIMEOUT};
|
||||
|
||||
#gzip on;
|
||||
client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
}
|
||||
11
apps/dify/1.1.1/conf/nginx/proxy.conf.template
Normal file
@@ -0,0 +1,11 @@
|
||||
# Please do not directly edit this file. Instead, modify the .env variables related to NGINX configuration.
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_buffering off;
|
||||
proxy_read_timeout ${NGINX_PROXY_READ_TIMEOUT};
|
||||
proxy_send_timeout ${NGINX_PROXY_SEND_TIMEOUT};
|
||||
0
apps/dify/1.1.1/conf/nginx/ssl/.gitkeep
Normal file
42
apps/dify/1.1.1/conf/ssrf_proxy/docker-entrypoint.sh
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Modified based on Squid OCI image entrypoint
|
||||
|
||||
# This entrypoint aims to forward the squid logs to stdout to assist users of
|
||||
# common container related tooling (e.g., kubernetes, docker-compose, etc) to
|
||||
# access the service logs.
|
||||
|
||||
# Moreover, it invokes the squid binary, leaving all the desired parameters to
|
||||
# be provided by the "command" passed to the spawned container. If no command
|
||||
# is provided by the user, the default behavior (as per the CMD statement in
|
||||
# the Dockerfile) will be to use Ubuntu's default configuration [1] and run
|
||||
# squid with the "-NYC" options to mimic the behavior of the Ubuntu provided
|
||||
# systemd unit.
|
||||
|
||||
# [1] The default configuration is changed in the Dockerfile to allow local
|
||||
# network connections. See the Dockerfile for further information.
|
||||
|
||||
echo "[ENTRYPOINT] re-create snakeoil self-signed certificate removed in the build process"
|
||||
if [ ! -f /etc/ssl/private/ssl-cert-snakeoil.key ]; then
|
||||
/usr/sbin/make-ssl-cert generate-default-snakeoil --force-overwrite > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
tail -F /var/log/squid/access.log 2>/dev/null &
|
||||
tail -F /var/log/squid/error.log 2>/dev/null &
|
||||
tail -F /var/log/squid/store.log 2>/dev/null &
|
||||
tail -F /var/log/squid/cache.log 2>/dev/null &
|
||||
|
||||
# Replace environment variables in the template and output to the squid.conf
|
||||
echo "[ENTRYPOINT] replacing environment variables in the template"
|
||||
awk '{
|
||||
while(match($0, /\${[A-Za-z_][A-Za-z_0-9]*}/)) {
|
||||
var = substr($0, RSTART+2, RLENGTH-3)
|
||||
val = ENVIRON[var]
|
||||
$0 = substr($0, 1, RSTART-1) val substr($0, RSTART+RLENGTH)
|
||||
}
|
||||
print
|
||||
}' /etc/squid/squid.conf.template > /etc/squid/squid.conf
|
||||
|
||||
/usr/sbin/squid -Nz
|
||||
echo "[ENTRYPOINT] starting squid"
|
||||
/usr/sbin/squid -f /etc/squid/squid.conf -NYC 1
|
||||
51
apps/dify/1.1.1/conf/ssrf_proxy/squid.conf.template
Normal file
@@ -0,0 +1,51 @@
|
||||
acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN)
|
||||
acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN)
|
||||
acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
|
||||
acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN)
|
||||
acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN)
|
||||
acl localnet src fc00::/7 # RFC 4193 local private network range
|
||||
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
|
||||
acl SSL_ports port 443
|
||||
# acl SSL_ports port 1025-65535 # Enable the configuration to resolve this issue: https://github.com/langgenius/dify/issues/12792
|
||||
acl Safe_ports port 80 # http
|
||||
acl Safe_ports port 21 # ftp
|
||||
acl Safe_ports port 443 # https
|
||||
acl Safe_ports port 70 # gopher
|
||||
acl Safe_ports port 210 # wais
|
||||
acl Safe_ports port 1025-65535 # unregistered ports
|
||||
acl Safe_ports port 280 # http-mgmt
|
||||
acl Safe_ports port 488 # gss-http
|
||||
acl Safe_ports port 591 # filemaker
|
||||
acl Safe_ports port 777 # multiling http
|
||||
acl CONNECT method CONNECT
|
||||
http_access deny !Safe_ports
|
||||
http_access deny CONNECT !SSL_ports
|
||||
http_access allow localhost manager
|
||||
http_access deny manager
|
||||
http_access allow localhost
|
||||
include /etc/squid/conf.d/*.conf
|
||||
http_access deny all
|
||||
|
||||
################################## Proxy Server ################################
|
||||
http_port ${HTTP_PORT}
|
||||
coredump_dir ${COREDUMP_DIR}
|
||||
refresh_pattern ^ftp: 1440 20% 10080
|
||||
refresh_pattern ^gopher: 1440 0% 1440
|
||||
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
|
||||
refresh_pattern \/(Packages|Sources)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/Release(|\.gpg)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/InRelease$ 0 0% 0 refresh-ims
|
||||
refresh_pattern \/(Translation-.*)(|\.bz2|\.gz|\.xz)$ 0 0% 0 refresh-ims
|
||||
refresh_pattern . 0 20% 4320
|
||||
|
||||
|
||||
# cache_dir ufs /var/spool/squid 100 16 256
|
||||
# upstream proxy, set to your own upstream proxy IP to avoid SSRF attacks
|
||||
# cache_peer 172.1.1.1 parent 3128 0 no-query no-digest no-netdb-exchange default
|
||||
|
||||
################################## Reverse Proxy To Sandbox ################################
|
||||
http_port ${REVERSE_PROXY_PORT} accel vhost
|
||||
cache_peer ${SANDBOX_HOST} parent ${SANDBOX_PORT} 0 no-query originserver
|
||||
acl src_all src all
|
||||
http_access allow src_all
|
||||
13
apps/dify/1.1.1/conf/startupscripts/init.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DB_INITIALIZED="/opt/oracle/oradata/dbinit"
|
||||
#[ -f ${DB_INITIALIZED} ] && exit
|
||||
#touch ${DB_INITIALIZED}
|
||||
if [ -f ${DB_INITIALIZED} ]; then
|
||||
echo 'File exists. Standards for have been Init'
|
||||
exit
|
||||
else
|
||||
echo 'File does not exist. Standards for first time Start up this DB'
|
||||
"$ORACLE_HOME"/bin/sqlplus -s "/ as sysdba" @"/opt/oracle/scripts/startup/init_user.script";
|
||||
touch ${DB_INITIALIZED}
|
||||
fi
|
||||
10
apps/dify/1.1.1/conf/startupscripts/init_user.script
Normal file
@@ -0,0 +1,10 @@
|
||||
show pdbs;
|
||||
ALTER SYSTEM SET PROCESSES=500 SCOPE=SPFILE;
|
||||
alter session set container= freepdb1;
|
||||
create user dify identified by dify DEFAULT TABLESPACE users quota unlimited on users;
|
||||
grant DB_DEVELOPER_ROLE to dify;
|
||||
|
||||
BEGIN
|
||||
CTX_DDL.CREATE_PREFERENCE('my_chinese_vgram_lexer','CHINESE_VGRAM_LEXER');
|
||||
END;
|
||||
/
|
||||
4
apps/dify/1.1.1/conf/tidb/config/pd.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
# PD Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/pd-configuration-file#pd-configuration-file
|
||||
[replication]
|
||||
max-replicas = 1
|
||||
13
apps/dify/1.1.1/conf/tidb/config/tiflash-learner.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
# TiFlash tiflash-learner.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflash-learnertoml-file
|
||||
|
||||
log-file = "/logs/tiflash_tikv.log"
|
||||
|
||||
[server]
|
||||
engine-addr = "tiflash:4030"
|
||||
addr = "0.0.0.0:20280"
|
||||
advertise-addr = "tiflash:20280"
|
||||
status-addr = "tiflash:20292"
|
||||
|
||||
[storage]
|
||||
data-dir = "/data/flash"
|
||||
19
apps/dify/1.1.1/conf/tidb/config/tiflash.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
# TiFlash tiflash.toml Configuration File reference:
|
||||
# https://docs.pingcap.com/tidb/stable/tiflash-configuration#configure-the-tiflashtoml-file
|
||||
|
||||
listen_host = "0.0.0.0"
|
||||
path = "/data"
|
||||
|
||||
[flash]
|
||||
tidb_status_addr = "tidb:10080"
|
||||
service_addr = "tiflash:4030"
|
||||
|
||||
[flash.proxy]
|
||||
config = "/tiflash-learner.toml"
|
||||
|
||||
[logger]
|
||||
errorlog = "/logs/tiflash_error.log"
|
||||
log = "/logs/tiflash.log"
|
||||
|
||||
[raft]
|
||||
pd_addr = "pd0:2379"
|
||||
62
apps/dify/1.1.1/conf/tidb/docker-compose.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
services:
|
||||
pd0:
|
||||
image: pingcap/pd:v8.5.1
|
||||
# ports:
|
||||
# - "2379"
|
||||
volumes:
|
||||
- ./config/pd.toml:/pd.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --name=pd0
|
||||
- --client-urls=http://0.0.0.0:2379
|
||||
- --peer-urls=http://0.0.0.0:2380
|
||||
- --advertise-client-urls=http://pd0:2379
|
||||
- --advertise-peer-urls=http://pd0:2380
|
||||
- --initial-cluster=pd0=http://pd0:2380
|
||||
- --data-dir=/data/pd
|
||||
- --config=/pd.toml
|
||||
- --log-file=/logs/pd.log
|
||||
restart: on-failure
|
||||
tikv:
|
||||
image: pingcap/tikv:v8.5.1
|
||||
volumes:
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --addr=0.0.0.0:20160
|
||||
- --advertise-addr=tikv:20160
|
||||
- --status-addr=tikv:20180
|
||||
- --data-dir=/data/tikv
|
||||
- --pd=pd0:2379
|
||||
- --log-file=/logs/tikv.log
|
||||
depends_on:
|
||||
- "pd0"
|
||||
restart: on-failure
|
||||
tidb:
|
||||
image: pingcap/tidb:v8.5.1
|
||||
# ports:
|
||||
# - "4000:4000"
|
||||
volumes:
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --advertise-address=tidb
|
||||
- --store=tikv
|
||||
- --path=pd0:2379
|
||||
- --log-file=/logs/tidb.log
|
||||
depends_on:
|
||||
- "tikv"
|
||||
restart: on-failure
|
||||
tiflash:
|
||||
image: pingcap/tiflash:v8.5.1
|
||||
volumes:
|
||||
- ./config/tiflash.toml:/tiflash.toml:ro
|
||||
- ./config/tiflash-learner.toml:/tiflash-learner.toml:ro
|
||||
- ./volumes/data:/data
|
||||
- ./volumes/logs:/logs
|
||||
command:
|
||||
- --config=/tiflash.toml
|
||||
depends_on:
|
||||
- "tikv"
|
||||
- "tidb"
|
||||
restart: on-failure
|
||||
@@ -0,0 +1,17 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<password></password>
|
||||
<networks>
|
||||
<ip>::1</ip> <!-- change to ::/0 to allow access from all addresses -->
|
||||
<ip>127.0.0.1</ip>
|
||||
<ip>10.0.0.0/8</ip>
|
||||
<ip>172.16.0.0/12</ip>
|
||||
<ip>192.168.0.0/16</ip>
|
||||
</networks>
|
||||
<profile>default</profile>
|
||||
<quota>default</quota>
|
||||
<access_management>1</access_management>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
@@ -0,0 +1 @@
|
||||
ALTER SYSTEM SET ob_vector_memory_limit_percentage = 30;
|
||||
@@ -0,0 +1,222 @@
|
||||
---
|
||||
# Copyright OpenSearch Contributors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Description:
|
||||
# Default configuration for OpenSearch Dashboards
|
||||
|
||||
# OpenSearch Dashboards is served by a back end server. This setting specifies the port to use.
|
||||
# server.port: 5601
|
||||
|
||||
# Specifies the address to which the OpenSearch Dashboards server will bind. IP addresses and host names are both valid values.
|
||||
# The default is 'localhost', which usually means remote machines will not be able to connect.
|
||||
# To allow connections from remote users, set this parameter to a non-loopback address.
|
||||
# server.host: "localhost"
|
||||
|
||||
# Enables you to specify a path to mount OpenSearch Dashboards at if you are running behind a proxy.
|
||||
# Use the `server.rewriteBasePath` setting to tell OpenSearch Dashboards if it should remove the basePath
|
||||
# from requests it receives, and to prevent a deprecation warning at startup.
|
||||
# This setting cannot end in a slash.
|
||||
# server.basePath: ""
|
||||
|
||||
# Specifies whether OpenSearch Dashboards should rewrite requests that are prefixed with
|
||||
# `server.basePath` or require that they are rewritten by your reverse proxy.
|
||||
# server.rewriteBasePath: false
|
||||
|
||||
# The maximum payload size in bytes for incoming server requests.
|
||||
# server.maxPayloadBytes: 1048576
|
||||
|
||||
# The OpenSearch Dashboards server's name. This is used for display purposes.
|
||||
# server.name: "your-hostname"
|
||||
|
||||
# The URLs of the OpenSearch instances to use for all your queries.
|
||||
# opensearch.hosts: ["http://localhost:9200"]
|
||||
|
||||
# OpenSearch Dashboards uses an index in OpenSearch to store saved searches, visualizations and
|
||||
# dashboards. OpenSearch Dashboards creates a new index if the index doesn't already exist.
|
||||
# opensearchDashboards.index: ".opensearch_dashboards"
|
||||
|
||||
# The default application to load.
|
||||
# opensearchDashboards.defaultAppId: "home"
|
||||
|
||||
# Setting for an optimized healthcheck that only uses the local OpenSearch node to do Dashboards healthcheck.
|
||||
# This settings should be used for large clusters or for clusters with ingest heavy nodes.
|
||||
# It allows Dashboards to only healthcheck using the local OpenSearch node rather than fan out requests across all nodes.
|
||||
#
|
||||
# It requires the user to create an OpenSearch node attribute with the same name as the value used in the setting
|
||||
# This node attribute should assign all nodes of the same cluster an integer value that increments with each new cluster that is spun up
|
||||
# e.g. in opensearch.yml file you would set the value to a setting using node.attr.cluster_id:
|
||||
# Should only be enabled if there is a corresponding node attribute created in your OpenSearch config that matches the value here
|
||||
# opensearch.optimizedHealthcheckId: "cluster_id"
|
||||
|
||||
# If your OpenSearch is protected with basic authentication, these settings provide
|
||||
# the username and password that the OpenSearch Dashboards server uses to perform maintenance on the OpenSearch Dashboards
|
||||
# index at startup. Your OpenSearch Dashboards users still need to authenticate with OpenSearch, which
|
||||
# is proxied through the OpenSearch Dashboards server.
|
||||
# opensearch.username: "opensearch_dashboards_system"
|
||||
# opensearch.password: "pass"
|
||||
|
||||
# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively.
|
||||
# These settings enable SSL for outgoing requests from the OpenSearch Dashboards server to the browser.
|
||||
# server.ssl.enabled: false
|
||||
# server.ssl.certificate: /path/to/your/server.crt
|
||||
# server.ssl.key: /path/to/your/server.key
|
||||
|
||||
# Optional settings that provide the paths to the PEM-format SSL certificate and key files.
|
||||
# These files are used to verify the identity of OpenSearch Dashboards to OpenSearch and are required when
|
||||
# xpack.security.http.ssl.client_authentication in OpenSearch is set to required.
|
||||
# opensearch.ssl.certificate: /path/to/your/client.crt
|
||||
# opensearch.ssl.key: /path/to/your/client.key
|
||||
|
||||
# Optional setting that enables you to specify a path to the PEM file for the certificate
|
||||
# authority for your OpenSearch instance.
|
||||
# opensearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ]
|
||||
|
||||
# To disregard the validity of SSL certificates, change this setting's value to 'none'.
|
||||
# opensearch.ssl.verificationMode: full
|
||||
|
||||
# Time in milliseconds to wait for OpenSearch to respond to pings. Defaults to the value of
|
||||
# the opensearch.requestTimeout setting.
|
||||
# opensearch.pingTimeout: 1500
|
||||
|
||||
# Time in milliseconds to wait for responses from the back end or OpenSearch. This value
|
||||
# must be a positive integer.
|
||||
# opensearch.requestTimeout: 30000
|
||||
|
||||
# List of OpenSearch Dashboards client-side headers to send to OpenSearch. To send *no* client-side
|
||||
# headers, set this value to [] (an empty list).
|
||||
# opensearch.requestHeadersWhitelist: [ authorization ]
|
||||
|
||||
# Header names and values that are sent to OpenSearch. Any custom headers cannot be overwritten
|
||||
# by client-side headers, regardless of the opensearch.requestHeadersWhitelist configuration.
|
||||
# opensearch.customHeaders: {}
|
||||
|
||||
# Time in milliseconds for OpenSearch to wait for responses from shards. Set to 0 to disable.
|
||||
# opensearch.shardTimeout: 30000
|
||||
|
||||
# Logs queries sent to OpenSearch. Requires logging.verbose set to true.
|
||||
# opensearch.logQueries: false
|
||||
|
||||
# Specifies the path where OpenSearch Dashboards creates the process ID file.
|
||||
# pid.file: /var/run/opensearchDashboards.pid
|
||||
|
||||
# Enables you to specify a file where OpenSearch Dashboards stores log output.
|
||||
# logging.dest: stdout
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output.
|
||||
# logging.silent: false
|
||||
|
||||
# Set the value of this setting to true to suppress all logging output other than error messages.
|
||||
# logging.quiet: false
|
||||
|
||||
# Set the value of this setting to true to log all events, including system usage information
|
||||
# and all requests.
|
||||
# logging.verbose: false
|
||||
|
||||
# Set the interval in milliseconds to sample system and process performance
|
||||
# metrics. Minimum is 100ms. Defaults to 5000.
|
||||
# ops.interval: 5000
|
||||
|
||||
# Specifies locale to be used for all localizable strings, dates and number formats.
|
||||
# Supported languages are the following: English - en , by default , Chinese - zh-CN .
|
||||
# i18n.locale: "en"
|
||||
|
||||
# Set the allowlist to check input graphite Url. Allowlist is the default check list.
|
||||
# vis_type_timeline.graphiteAllowedUrls: ['https://www.hostedgraphite.com/UID/ACCESS_KEY/graphite']
|
||||
|
||||
# Set the blocklist to check input graphite Url. Blocklist is an IP list.
|
||||
# Below is an example for reference
|
||||
# vis_type_timeline.graphiteBlockedIPs: [
|
||||
# //Loopback
|
||||
# '127.0.0.0/8',
|
||||
# '::1/128',
|
||||
# //Link-local Address for IPv6
|
||||
# 'fe80::/10',
|
||||
# //Private IP address for IPv4
|
||||
# '10.0.0.0/8',
|
||||
# '172.16.0.0/12',
|
||||
# '192.168.0.0/16',
|
||||
# //Unique local address (ULA)
|
||||
# 'fc00::/7',
|
||||
# //Reserved IP address
|
||||
# '0.0.0.0/8',
|
||||
# '100.64.0.0/10',
|
||||
# '192.0.0.0/24',
|
||||
# '192.0.2.0/24',
|
||||
# '198.18.0.0/15',
|
||||
# '192.88.99.0/24',
|
||||
# '198.51.100.0/24',
|
||||
# '203.0.113.0/24',
|
||||
# '224.0.0.0/4',
|
||||
# '240.0.0.0/4',
|
||||
# '255.255.255.255/32',
|
||||
# '::/128',
|
||||
# '2001:db8::/32',
|
||||
# 'ff00::/8',
|
||||
# ]
|
||||
# vis_type_timeline.graphiteBlockedIPs: []
|
||||
|
||||
# opensearchDashboards.branding:
|
||||
# logo:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# mark:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# loadingLogo:
|
||||
# defaultUrl: ""
|
||||
# darkModeUrl: ""
|
||||
# faviconUrl: ""
|
||||
# applicationTitle: ""
|
||||
|
||||
# Set the value of this setting to true to capture region blocked warnings and errors
|
||||
# for your map rendering services.
|
||||
# map.showRegionBlockedWarning: false%
|
||||
|
||||
# Set the value of this setting to false to suppress search usage telemetry
|
||||
# for reducing the load of OpenSearch cluster.
|
||||
# data.search.usageTelemetry.enabled: false
|
||||
|
||||
# 2.4 renames 'wizard.enabled: false' to 'vis_builder.enabled: false'
|
||||
# Set the value of this setting to false to disable VisBuilder
|
||||
# functionality in Visualization.
|
||||
# vis_builder.enabled: false
|
||||
|
||||
# 2.4 New Experimental Feature
|
||||
# Set the value of this setting to true to enable the experimental multiple data source
|
||||
# support feature. Use with caution.
|
||||
# data_source.enabled: false
|
||||
# Set the value of these settings to customize crypto materials to encryption saved credentials
|
||||
# in data sources.
|
||||
# data_source.encryption.wrappingKeyName: 'changeme'
|
||||
# data_source.encryption.wrappingKeyNamespace: 'changeme'
|
||||
# data_source.encryption.wrappingKey: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||
|
||||
# 2.6 New ML Commons Dashboards Feature
|
||||
# Set the value of this setting to true to enable the ml commons dashboards
|
||||
# ml_commons_dashboards.enabled: false
|
||||
|
||||
# 2.12 New experimental Assistant Dashboards Feature
|
||||
# Set the value of this setting to true to enable the assistant dashboards
|
||||
# assistant.chat.enabled: false
|
||||
|
||||
# 2.13 New Query Assistant Feature
|
||||
# Set the value of this setting to false to disable the query assistant
|
||||
# observability.query_assist.enabled: false
|
||||
|
||||
# 2.14 Enable Ui Metric Collectors in Usage Collector
|
||||
# Set the value of this setting to true to enable UI Metric collections
|
||||
# usageCollection.uiMetric.enabled: false
|
||||
|
||||
opensearch.hosts: [https://localhost:9200]
|
||||
opensearch.ssl.verificationMode: none
|
||||
opensearch.username: admin
|
||||
opensearch.password: 'Qazwsxedc!@#123'
|
||||
opensearch.requestHeadersWhitelist: [authorization, securitytenant]
|
||||
|
||||
opensearch_security.multitenancy.enabled: true
|
||||
opensearch_security.multitenancy.tenants.preferred: [Private, Global]
|
||||
opensearch_security.readonly_mode.roles: [kibana_read_only]
|
||||
# Use this setting if you are running opensearch-dashboards without https
|
||||
opensearch_security.cookie.secure: false
|
||||
server.host: '0.0.0.0'
|
||||
14
apps/dify/1.1.1/conf/volumes/sandbox/conf/config.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
app:
|
||||
port: 8194
|
||||
debug: True
|
||||
key: dify-sandbox
|
||||
max_workers: 4
|
||||
max_requests: 50
|
||||
worker_timeout: 5
|
||||
python_path: /usr/local/bin/python3
|
||||
enable_network: True # please make sure there is no network risk in your environment
|
||||
allowed_syscalls: # please leave it empty if you have no idea how seccomp works
|
||||
proxy:
|
||||
socks5: ''
|
||||
http: ''
|
||||
https: ''
|
||||
@@ -0,0 +1,35 @@
|
||||
app:
|
||||
port: 8194
|
||||
debug: True
|
||||
key: dify-sandbox
|
||||
max_workers: 4
|
||||
max_requests: 50
|
||||
worker_timeout: 5
|
||||
python_path: /usr/local/bin/python3
|
||||
python_lib_path:
|
||||
- /usr/local/lib/python3.10
|
||||
- /usr/lib/python3.10
|
||||
- /usr/lib/python3
|
||||
- /usr/lib/x86_64-linux-gnu
|
||||
- /etc/ssl/certs/ca-certificates.crt
|
||||
- /etc/nsswitch.conf
|
||||
- /etc/hosts
|
||||
- /etc/resolv.conf
|
||||
- /run/systemd/resolve/stub-resolv.conf
|
||||
- /run/resolvconf/resolv.conf
|
||||
- /etc/localtime
|
||||
- /usr/share/zoneinfo
|
||||
- /etc/timezone
|
||||
# add more paths if needed
|
||||
python_pip_mirror_url: https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
nodejs_path: /usr/local/bin/node
|
||||
enable_network: True
|
||||
allowed_syscalls:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
# add all the syscalls which you require
|
||||
proxy:
|
||||
socks5: ''
|
||||
http: ''
|
||||
https: ''
|
||||
82
apps/dify/1.1.1/data.yml
Normal file
@@ -0,0 +1,82 @@
|
||||
additionalProperties:
|
||||
formFields:
|
||||
- default: "/home/dify"
|
||||
edit: true
|
||||
envKey: DIFY_ROOT_PATH
|
||||
labelZh: 数据持久化路径
|
||||
labelEn: Data persistence path
|
||||
required: true
|
||||
type: text
|
||||
- default: 8080
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTP
|
||||
labelZh: 网站端口
|
||||
labelEn: WebUI port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 8443
|
||||
edit: true
|
||||
envKey: PANEL_APP_PORT_HTTPS
|
||||
labelZh: HTTPS 端口
|
||||
labelEn: HTTPS port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 5432
|
||||
edit: true
|
||||
envKey: EXPOSE_DB_PORT
|
||||
labelZh: 数据库端口
|
||||
labelEn: Database port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 5003
|
||||
edit: true
|
||||
envKey: EXPOSE_PLUGIN_DEBUGGING_PORT
|
||||
labelZh: 插件调试端口
|
||||
labelEn: Plugin debugging port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 19530
|
||||
disabled: true
|
||||
edit: true
|
||||
envKey: MILVUS_STANDALONE_API_PORT
|
||||
labelZh: Milvus 接口端口
|
||||
labelEn: Milvus API port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 9091
|
||||
disabled: true
|
||||
envKey: MILVUS_STANDALONE_SERVER_PORT
|
||||
labelZh: Milvus 服务端口
|
||||
labelEn: Milvus server port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 8123
|
||||
edit: true
|
||||
envKey: MYSCALE_PORT
|
||||
labelZh: MyScale 端口
|
||||
labelEn: MyScale port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 9200
|
||||
edit: true
|
||||
envKey: ELASTICSEARCH_PORT
|
||||
labelZh: Elasticsearch 端口
|
||||
labelEn: Elasticsearch port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
- default: 5601
|
||||
edit: true
|
||||
envKey: KIBANA_PORT
|
||||
labelZh: Kibana 端口
|
||||
labelEn: Kibana port
|
||||
required: true
|
||||
rule: paramPort
|
||||
type: number
|
||||
970
apps/dify/1.1.1/docker-compose.yml
Normal file
@@ -0,0 +1,970 @@
|
||||
# ==================================================================
|
||||
# WARNING: This file is auto-generated by generate_docker_compose
|
||||
# Do not modify this file directly. Instead, update the .env.example
|
||||
# or docker-compose-template.yaml and regenerate this file.
|
||||
# ==================================================================
|
||||
|
||||
x-shared-env: &shared-api-worker-env
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-}
|
||||
SERVICE_API_URL: ${SERVICE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
APP_WEB_URL: ${APP_WEB_URL:-}
|
||||
FILES_URL: ${FILES_URL:-}
|
||||
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
||||
LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
|
||||
LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20}
|
||||
LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5}
|
||||
LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S}
|
||||
LOG_TZ: ${LOG_TZ:-UTC}
|
||||
DEBUG: ${DEBUG:-false}
|
||||
FLASK_DEBUG: ${FLASK_DEBUG:-false}
|
||||
SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U}
|
||||
INIT_PASSWORD: ${INIT_PASSWORD:-}
|
||||
DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION}
|
||||
CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai}
|
||||
OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1}
|
||||
MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true}
|
||||
FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300}
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60}
|
||||
REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30}
|
||||
APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0}
|
||||
APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200}
|
||||
DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0}
|
||||
DIFY_PORT: ${DIFY_PORT:-5001}
|
||||
SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1}
|
||||
SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent}
|
||||
SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10}
|
||||
CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-}
|
||||
GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360}
|
||||
CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-}
|
||||
CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false}
|
||||
CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-}
|
||||
CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-}
|
||||
API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10}
|
||||
API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60}
|
||||
DB_USERNAME: ${DB_USERNAME:-postgres}
|
||||
DB_PASSWORD: ${DB_PASSWORD:-difyai123456}
|
||||
DB_HOST: ${DB_HOST:-db}
|
||||
DB_PORT: ${DB_PORT:-5432}
|
||||
DB_DATABASE: ${DB_DATABASE:-dify}
|
||||
SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30}
|
||||
SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600}
|
||||
SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false}
|
||||
POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
|
||||
POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB}
|
||||
POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB}
|
||||
POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
REDIS_PORT: ${REDIS_PORT:-6379}
|
||||
REDIS_USERNAME: ${REDIS_USERNAME:-}
|
||||
REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456}
|
||||
REDIS_USE_SSL: ${REDIS_USE_SSL:-false}
|
||||
REDIS_DB: ${REDIS_DB:-0}
|
||||
REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false}
|
||||
REDIS_SENTINELS: ${REDIS_SENTINELS:-}
|
||||
REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-}
|
||||
REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-}
|
||||
REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-}
|
||||
REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1}
|
||||
REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false}
|
||||
REDIS_CLUSTERS: ${REDIS_CLUSTERS:-}
|
||||
REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-}
|
||||
CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1}
|
||||
BROKER_USE_SSL: ${BROKER_USE_SSL:-false}
|
||||
CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false}
|
||||
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
|
||||
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
|
||||
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
|
||||
STORAGE_TYPE: ${STORAGE_TYPE:-opendal}
|
||||
OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs}
|
||||
OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage}
|
||||
S3_ENDPOINT: ${S3_ENDPOINT:-}
|
||||
S3_REGION: ${S3_REGION:-us-east-1}
|
||||
S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai}
|
||||
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-}
|
||||
S3_SECRET_KEY: ${S3_SECRET_KEY:-}
|
||||
S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false}
|
||||
AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai}
|
||||
AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai}
|
||||
AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container}
|
||||
AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://<your_account_name>.blob.core.windows.net}
|
||||
GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name}
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-}
|
||||
ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name}
|
||||
ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key}
|
||||
ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key}
|
||||
ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com}
|
||||
ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1}
|
||||
ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4}
|
||||
ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path}
|
||||
TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name}
|
||||
TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key}
|
||||
TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id}
|
||||
TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region}
|
||||
TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme}
|
||||
OCI_ENDPOINT: ${OCI_ENDPOINT:-https://objectstorage.us-ashburn-1.oraclecloud.com}
|
||||
OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name}
|
||||
OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key}
|
||||
OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key}
|
||||
OCI_REGION: ${OCI_REGION:-us-ashburn-1}
|
||||
HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name}
|
||||
HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key}
|
||||
HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key}
|
||||
HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url}
|
||||
VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name}
|
||||
VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key}
|
||||
VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key}
|
||||
VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url}
|
||||
VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region}
|
||||
BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name}
|
||||
BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key}
|
||||
BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key}
|
||||
BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url}
|
||||
SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name}
|
||||
SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key}
|
||||
SUPABASE_URL: ${SUPABASE_URL:-your-server-url}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-weaviate}
|
||||
WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080}
|
||||
WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333}
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20}
|
||||
QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false}
|
||||
QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334}
|
||||
MILVUS_URI: ${MILVUS_URI:-http://127.0.0.1:19530}
|
||||
MILVUS_TOKEN: ${MILVUS_TOKEN:-}
|
||||
MILVUS_USER: ${MILVUS_USER:-root}
|
||||
MILVUS_PASSWORD: ${MILVUS_PASSWORD:-Milvus}
|
||||
MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False}
|
||||
MYSCALE_HOST: ${MYSCALE_HOST:-myscale}
|
||||
MYSCALE_PORT: ${MYSCALE_PORT:-8123}
|
||||
MYSCALE_USER: ${MYSCALE_USER:-default}
|
||||
MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-}
|
||||
MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify}
|
||||
MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-}
|
||||
COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server}
|
||||
COUCHBASE_USER: ${COUCHBASE_USER:-Administrator}
|
||||
COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password}
|
||||
COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default}
|
||||
PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector}
|
||||
PGVECTOR_PORT: ${PGVECTOR_PORT:-5432}
|
||||
PGVECTOR_USER: ${PGVECTOR_USER:-postgres}
|
||||
PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456}
|
||||
PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify}
|
||||
PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1}
|
||||
PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5}
|
||||
PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs}
|
||||
PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432}
|
||||
PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres}
|
||||
PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456}
|
||||
PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify}
|
||||
ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak}
|
||||
ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk}
|
||||
ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou}
|
||||
ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456}
|
||||
ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount}
|
||||
ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword}
|
||||
ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify}
|
||||
ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword}
|
||||
ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com}
|
||||
ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432}
|
||||
ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1}
|
||||
ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5}
|
||||
TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb}
|
||||
TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000}
|
||||
TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-}
|
||||
TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-}
|
||||
TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify}
|
||||
TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1}
|
||||
TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify}
|
||||
TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20}
|
||||
TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false}
|
||||
TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334}
|
||||
TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify}
|
||||
TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify}
|
||||
TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1}
|
||||
TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1}
|
||||
TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1}
|
||||
TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify}
|
||||
TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100}
|
||||
CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1}
|
||||
CHROMA_PORT: ${CHROMA_PORT:-8000}
|
||||
CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant}
|
||||
CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database}
|
||||
CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider}
|
||||
CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-}
|
||||
ORACLE_HOST: ${ORACLE_HOST:-oracle}
|
||||
ORACLE_PORT: ${ORACLE_PORT:-1521}
|
||||
ORACLE_USER: ${ORACLE_USER:-dify}
|
||||
ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify}
|
||||
ORACLE_DATABASE: ${ORACLE_DATABASE:-FREEPDB1}
|
||||
RELYT_HOST: ${RELYT_HOST:-db}
|
||||
RELYT_PORT: ${RELYT_PORT:-5432}
|
||||
RELYT_USER: ${RELYT_USER:-postgres}
|
||||
RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456}
|
||||
RELYT_DATABASE: ${RELYT_DATABASE:-postgres}
|
||||
OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch}
|
||||
OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
|
||||
OPENSEARCH_USER: ${OPENSEARCH_USER:-admin}
|
||||
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin}
|
||||
OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true}
|
||||
TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1}
|
||||
TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify}
|
||||
TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30}
|
||||
TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify}
|
||||
TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify}
|
||||
TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1}
|
||||
TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2}
|
||||
ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0}
|
||||
ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200}
|
||||
ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic}
|
||||
ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
KIBANA_PORT: ${KIBANA_PORT:-5601}
|
||||
BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287}
|
||||
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000}
|
||||
BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root}
|
||||
BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify}
|
||||
BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify}
|
||||
BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1}
|
||||
BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3}
|
||||
VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak}
|
||||
VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk}
|
||||
VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai}
|
||||
VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com}
|
||||
VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http}
|
||||
VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30}
|
||||
VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30}
|
||||
LINDORM_URL: ${LINDORM_URL:-http://lindorm:30070}
|
||||
LINDORM_USERNAME: ${LINDORM_USERNAME:-lindorm}
|
||||
LINDORM_PASSWORD: ${LINDORM_PASSWORD:-lindorm}
|
||||
OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase}
|
||||
OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881}
|
||||
OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test}
|
||||
OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test}
|
||||
OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io}
|
||||
UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify}
|
||||
UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15}
|
||||
UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5}
|
||||
ETL_TYPE: ${ETL_TYPE:-dify}
|
||||
UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-}
|
||||
UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-}
|
||||
SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true}
|
||||
PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512}
|
||||
CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024}
|
||||
MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64}
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10}
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100}
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50}
|
||||
SENTRY_DSN: ${SENTRY_DSN:-}
|
||||
API_SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public}
|
||||
NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-}
|
||||
NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-}
|
||||
NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-}
|
||||
MAIL_TYPE: ${MAIL_TYPE:-resend}
|
||||
MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-}
|
||||
RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com}
|
||||
RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key}
|
||||
SMTP_SERVER: ${SMTP_SERVER:-}
|
||||
SMTP_PORT: ${SMTP_PORT:-465}
|
||||
SMTP_USERNAME: ${SMTP_USERNAME:-}
|
||||
SMTP_PASSWORD: ${SMTP_PASSWORD:-}
|
||||
SMTP_USE_TLS: ${SMTP_USE_TLS:-true}
|
||||
SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000}
|
||||
INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72}
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5}
|
||||
CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194}
|
||||
CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox}
|
||||
CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807}
|
||||
CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808}
|
||||
CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5}
|
||||
CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20}
|
||||
CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-80000}
|
||||
CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30}
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000}
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10}
|
||||
CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60}
|
||||
CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10}
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-80000}
|
||||
WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500}
|
||||
WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200}
|
||||
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5}
|
||||
MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800}
|
||||
WORKFLOW_PARALLEL_DEPTH_LIMIT: ${WORKFLOW_PARALLEL_DEPTH_LIMIT:-3}
|
||||
WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10}
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
|
||||
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
|
||||
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
PGUSER: ${PGUSER:-${DB_USERNAME}}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-${DB_PASSWORD}}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-${DB_DATABASE}}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true}
|
||||
WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m}
|
||||
OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com}
|
||||
CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-}
|
||||
SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5}
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5}
|
||||
SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5}
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5}
|
||||
EXPOSE_NGINX_PORT: ${PANEL_APP_PORT_HTTP:-8080}
|
||||
EXPOSE_NGINX_SSL_PORT: ${PANEL_APP_PORT_HTTPS:-8443}
|
||||
POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-}
|
||||
POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-}
|
||||
POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-}
|
||||
POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-}
|
||||
POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-}
|
||||
POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false}
|
||||
MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10}
|
||||
DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002}
|
||||
PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0}
|
||||
PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003}
|
||||
EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost}
|
||||
EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}
|
||||
PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}}
|
||||
MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
|
||||
services:
|
||||
api:
|
||||
image: langgenius/dify-api:1.1.1
|
||||
container_name: api-${CONTAINER_NAME}
|
||||
restart: always
|
||||
environment:
|
||||
<<: *shared-api-worker-env
|
||||
MODE: api
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
worker:
|
||||
image: langgenius/dify-api:1.1.1
|
||||
container_name: worker-${CONTAINER_NAME}
|
||||
restart: always
|
||||
environment:
|
||||
<<: *shared-api-worker-env
|
||||
MODE: worker
|
||||
SENTRY_DSN: ${API_SENTRY_DSN:-}
|
||||
SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0}
|
||||
SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0}
|
||||
PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
INNER_API_KEY_FOR_PLUGIN: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/app/storage:/app/api/storage
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
web:
|
||||
image: langgenius/dify-web:1.1.1
|
||||
container_name: ${CONTAINER_NAME}
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
APP_API_URL: ${APP_API_URL:-}
|
||||
SENTRY_DSN: ${WEB_SENTRY_DSN:-}
|
||||
NEXT_TELEMETRY_DISABLED: ${NEXT_TELEMETRY_DISABLED:-0}
|
||||
TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000}
|
||||
CSP_WHITELIST: ${CSP_WHITELIST:-}
|
||||
MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai}
|
||||
MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai}
|
||||
TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-}
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-}
|
||||
|
||||
db:
|
||||
image: postgres:15-alpine
|
||||
container_name: db-${CONTAINER_NAME}
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGUSER:-postgres}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
command: >
|
||||
postgres -c 'max_connections=${POSTGRES_MAX_CONNECTIONS:-100}'
|
||||
-c 'shared_buffers=${POSTGRES_SHARED_BUFFERS:-128MB}'
|
||||
-c 'work_mem=${POSTGRES_WORK_MEM:-4MB}'
|
||||
-c 'maintenance_work_mem=${POSTGRES_MAINTENANCE_WORK_MEM:-64MB}'
|
||||
-c 'effective_cache_size=${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB}'
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/db/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'pg_isready' ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
ports:
|
||||
- '${EXPOSE_DB_PORT:-5432}:5432'
|
||||
|
||||
redis:
|
||||
image: redis:6-alpine
|
||||
container_name: redis-${CONTAINER_NAME}
|
||||
restart: always
|
||||
environment:
|
||||
REDISCLI_AUTH: ${REDIS_PASSWORD:-difyai123456}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/redis/data:/data
|
||||
command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456}
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'redis-cli', 'ping' ]
|
||||
|
||||
sandbox:
|
||||
image: langgenius/dify-sandbox:0.2.10
|
||||
container_name: sandbox-${CONTAINER_NAME}
|
||||
restart: always
|
||||
environment:
|
||||
API_KEY: ${SANDBOX_API_KEY:-dify-sandbox}
|
||||
GIN_MODE: ${SANDBOX_GIN_MODE:-release}
|
||||
WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15}
|
||||
ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true}
|
||||
HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128}
|
||||
HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/sandbox/dependencies:/dependencies
|
||||
- ${DIFY_ROOT_PATH}/volumes/sandbox/conf:/conf
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ]
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
|
||||
plugin_daemon:
|
||||
image: langgenius/dify-plugin-daemon:0.0.3-local
|
||||
container_name: plugin_daemon-${CONTAINER_NAME}
|
||||
restart: always
|
||||
environment:
|
||||
<<: *shared-api-worker-env
|
||||
DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin}
|
||||
SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002}
|
||||
SERVER_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi}
|
||||
MAX_PLUGIN_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800}
|
||||
PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false}
|
||||
DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001}
|
||||
DIFY_INNER_API_KEY: ${INNER_API_KEY_FOR_PLUGIN:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1}
|
||||
PLUGIN_REMOTE_INSTALLING_HOST: ${PLUGIN_REMOTE_INSTALL_HOST:-0.0.0.0}
|
||||
PLUGIN_REMOTE_INSTALLING_PORT: ${PLUGIN_REMOTE_INSTALL_PORT:-5003}
|
||||
PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd}
|
||||
FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true}
|
||||
ports:
|
||||
- "${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003}:${PLUGIN_DEBUGGING_PORT:-5003}"
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/plugin_daemon:/app/storage
|
||||
|
||||
|
||||
ssrf_proxy:
|
||||
image: ubuntu/squid:latest
|
||||
container_name: ssrf_proxy-${CONTAINER_NAME}
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template
|
||||
- ${DIFY_ROOT_PATH}/ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
|
||||
environment:
|
||||
HTTP_PORT: ${SSRF_HTTP_PORT:-3128}
|
||||
COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid}
|
||||
REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194}
|
||||
SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox}
|
||||
SANDBOX_PORT: ${SANDBOX_PORT:-8194}
|
||||
networks:
|
||||
- ssrf_proxy_network
|
||||
- default
|
||||
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
container_name: certbot-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- certbot
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/certbot/conf:/etc/letsencrypt
|
||||
- ${DIFY_ROOT_PATH}/volumes/certbot/www:/var/www/html
|
||||
- ${DIFY_ROOT_PATH}/volumes/certbot/logs:/var/log/letsencrypt
|
||||
- ${DIFY_ROOT_PATH}/volumes/certbot/conf/live:/etc/letsencrypt/live
|
||||
- ${DIFY_ROOT_PATH}/certbot/update-cert.template.txt:/update-cert.template.txt
|
||||
- ${DIFY_ROOT_PATH}/certbot/docker-entrypoint.sh:/docker-entrypoint.sh
|
||||
environment:
|
||||
- CERTBOT_EMAIL=${CERTBOT_EMAIL}
|
||||
- CERTBOT_DOMAIN=${CERTBOT_DOMAIN}
|
||||
- CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-}
|
||||
entrypoint: [ '/docker-entrypoint.sh' ]
|
||||
command: [ 'tail', '-f', '/dev/null' ]
|
||||
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
container_name: nginx-${CONTAINER_NAME}
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/nginx/nginx.conf.template:/etc/nginx/nginx.conf.template
|
||||
- ${DIFY_ROOT_PATH}/nginx/proxy.conf.template:/etc/nginx/proxy.conf.template
|
||||
- ${DIFY_ROOT_PATH}/nginx/https.conf.template:/etc/nginx/https.conf.template
|
||||
- ${DIFY_ROOT_PATH}/nginx/conf.d:/etc/nginx/conf.d
|
||||
- ${DIFY_ROOT_PATH}/nginx/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- ${DIFY_ROOT_PATH}/nginx/ssl:/etc/ssl # cert dir (legacy)
|
||||
- ${DIFY_ROOT_PATH}/volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container)
|
||||
- ${DIFY_ROOT_PATH}/volumes/certbot/conf:/etc/letsencrypt
|
||||
- ${DIFY_ROOT_PATH}/volumes/certbot/www:/var/www/html
|
||||
entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ]
|
||||
environment:
|
||||
NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_}
|
||||
NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false}
|
||||
NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443}
|
||||
NGINX_PORT: ${NGINX_PORT:-80}
|
||||
NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt}
|
||||
NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key}
|
||||
NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.1 TLSv1.2 TLSv1.3}
|
||||
NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto}
|
||||
NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-15M}
|
||||
NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65}
|
||||
NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s}
|
||||
NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s}
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false}
|
||||
CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-}
|
||||
depends_on:
|
||||
- api
|
||||
- web
|
||||
ports:
|
||||
- '${PANEL_APP_PORT_HTTP:-80}:${NGINX_PORT:-80}'
|
||||
- '${PANEL_APP_PORT_HTTPS:-443}:${NGINX_SSL_PORT:-443}'
|
||||
|
||||
weaviate:
|
||||
image: semitechnologies/weaviate:1.19.0
|
||||
container_name: weaviate-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- ''
|
||||
- weaviate
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/weaviate:/var/lib/weaviate
|
||||
environment:
|
||||
PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate}
|
||||
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25}
|
||||
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-false}
|
||||
DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none}
|
||||
CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1}
|
||||
AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true}
|
||||
AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih}
|
||||
AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai}
|
||||
AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true}
|
||||
AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai}
|
||||
|
||||
qdrant:
|
||||
image: langgenius/qdrant:v1.7.3
|
||||
container_name: qdrant-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- qdrant
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/qdrant:/qdrant/storage
|
||||
environment:
|
||||
QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456}
|
||||
|
||||
couchbase-server:
|
||||
build: ./conf/couchbase-server
|
||||
profiles:
|
||||
- couchbase
|
||||
restart: always
|
||||
environment:
|
||||
- CLUSTER_NAME=dify_search
|
||||
- COUCHBASE_ADMINISTRATOR_USERNAME=${COUCHBASE_USER:-Administrator}
|
||||
- COUCHBASE_ADMINISTRATOR_PASSWORD=${COUCHBASE_PASSWORD:-password}
|
||||
- COUCHBASE_BUCKET=${COUCHBASE_BUCKET_NAME:-Embeddings}
|
||||
- COUCHBASE_BUCKET_RAMSIZE=512
|
||||
- COUCHBASE_RAM_SIZE=2048
|
||||
- COUCHBASE_EVENTING_RAM_SIZE=512
|
||||
- COUCHBASE_INDEX_RAM_SIZE=512
|
||||
- COUCHBASE_FTS_RAM_SIZE=1024
|
||||
hostname: couchbase-server
|
||||
container_name: couchbase-server
|
||||
working_dir: /opt/couchbase
|
||||
stdin_open: true
|
||||
tty: true
|
||||
entrypoint: [ "" ]
|
||||
command: sh -c "/opt/couchbase/init/init-cbserver.sh"
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ]
|
||||
interval: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
timeout: 10s
|
||||
|
||||
pgvector:
|
||||
image: pgvector/pgvector:pg16
|
||||
container_name: pgvector-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- pgvector
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/pgvector/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'pg_isready' ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
pgvecto-rs:
|
||||
image: tensorchord/pgvecto-rs:pg16-v0.3.0
|
||||
container_name: pgvecto-rs-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- pgvecto-rs
|
||||
restart: always
|
||||
environment:
|
||||
PGUSER: ${PGVECTOR_PGUSER:-postgres}
|
||||
POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456}
|
||||
POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify}
|
||||
PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/pgvecto_rs/data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'pg_isready' ]
|
||||
interval: 1s
|
||||
timeout: 3s
|
||||
retries: 30
|
||||
|
||||
chroma:
|
||||
image: ghcr.io/chroma-core/chroma:0.5.20
|
||||
container_name: chroma-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- chroma
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/chroma:/chroma/chroma
|
||||
environment:
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456}
|
||||
CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider}
|
||||
IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE}
|
||||
|
||||
oceanbase:
|
||||
image: quay.io/oceanbase/oceanbase-ce:4.3.3.0-100000142024101215
|
||||
container_name: oceanbase-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- oceanbase
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/oceanbase/data:/root/ob
|
||||
- ${DIFY_ROOT_PATH}/volumes/oceanbase/conf:/root/.obd/cluster
|
||||
- ${DIFY_ROOT_PATH}/volumes/oceanbase/init.d:/root/boot/init.d
|
||||
environment:
|
||||
OB_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G}
|
||||
OB_SYS_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_TENANT_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456}
|
||||
OB_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai}
|
||||
OB_SERVER_IP: '127.0.0.1'
|
||||
|
||||
oracle:
|
||||
image: container-registry.oracle.com/database/free:latest
|
||||
container_name: oracle-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- oracle
|
||||
restart: always
|
||||
volumes:
|
||||
- source: oradata
|
||||
type: volume
|
||||
target: /opt/oracle/oradata
|
||||
- ${DIFY_ROOT_PATH}/startupscripts:/opt/oracle/scripts/startup
|
||||
environment:
|
||||
ORACLE_PWD: ${ORACLE_PWD:-Dify123456}
|
||||
ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8}
|
||||
|
||||
etcd:
|
||||
image: quay.io/coreos/etcd:v3.5.5
|
||||
container_name: milvus-etcd-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision}
|
||||
ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000}
|
||||
ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296}
|
||||
ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/milvus/etcd:/etcd
|
||||
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
minio:
|
||||
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
container_name: milvus-minio-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- milvus
|
||||
environment:
|
||||
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||||
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/milvus/minio:/minio_data
|
||||
command: minio server /minio_data --console-address ":9001"
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ]
|
||||
interval: 30s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
milvus-standalone:
|
||||
image: milvusdb/milvus:v2.5.0-beta
|
||||
container_name: milvus-standalone-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- milvus
|
||||
command: [ 'milvus', 'run', 'standalone' ]
|
||||
environment:
|
||||
ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379}
|
||||
MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000}
|
||||
common.security.authorizationEnabled: ${MILVUS_AUTHORIZATION_ENABLED:-true}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/milvus/milvus:/var/lib/milvus
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ]
|
||||
interval: 30s
|
||||
start_period: 90s
|
||||
timeout: 20s
|
||||
retries: 3
|
||||
depends_on:
|
||||
- etcd
|
||||
- minio
|
||||
ports:
|
||||
- 19530:19530
|
||||
- 9091:9091
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
opensearch:
|
||||
image: opensearchproject/opensearch:latest
|
||||
container_name: opensearch-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
discovery.type: ${OPENSEARCH_DISCOVERY_TYPE:-single-node}
|
||||
bootstrap.memory_lock: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true}
|
||||
OPENSEARCH_JAVA_OPTS: -Xms${OPENSEARCH_JAVA_OPTS_MIN:-512m} -Xmx${OPENSEARCH_JAVA_OPTS_MAX:-1024m}
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: ${OPENSEARCH_MEMLOCK_SOFT:--1}
|
||||
hard: ${OPENSEARCH_MEMLOCK_HARD:--1}
|
||||
nofile:
|
||||
soft: ${OPENSEARCH_NOFILE_SOFT:-65536}
|
||||
hard: ${OPENSEARCH_NOFILE_HARD:-65536}
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/opensearch/data:/usr/share/opensearch/data
|
||||
networks:
|
||||
- opensearch-net
|
||||
|
||||
opensearch-dashboards:
|
||||
image: opensearchproject/opensearch-dashboards:latest
|
||||
container_name: opensearch-dashboards-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- opensearch
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/opensearch/opensearch_dashboards.yml:/usr/share/opensearch-dashboards/config/opensearch_dashboards.yml
|
||||
networks:
|
||||
- opensearch-net
|
||||
depends_on:
|
||||
- opensearch
|
||||
|
||||
myscale:
|
||||
image: myscale/myscaledb:1.6.4
|
||||
container_name: myscale-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- myscale
|
||||
restart: always
|
||||
tty: true
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/myscale/data:/var/lib/clickhouse
|
||||
- ${DIFY_ROOT_PATH}/volumes/myscale/log:/var/log/clickhouse-server
|
||||
- ${DIFY_ROOT_PATH}/volumes/myscale/config/users.d/custom_users_config.xml:/etc/clickhouse-server/users.d/custom_users_config.xml
|
||||
ports:
|
||||
- ${MYSCALE_PORT:-8123}:${MYSCALE_PORT:-8123}
|
||||
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
|
||||
container_name: elasticsearch-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- elasticsearch
|
||||
- elasticsearch-ja
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/elasticsearch/docker-entrypoint.sh:/docker-entrypoint-mount.sh
|
||||
- dify_es01_data:/usr/share/elasticsearch/data
|
||||
environment:
|
||||
ELASTIC_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic}
|
||||
VECTOR_STORE: ${VECTOR_STORE:-}
|
||||
cluster.name: dify-es-cluster
|
||||
node.name: dify-es0
|
||||
discovery.type: single-node
|
||||
xpack.license.self_generated.type: basic
|
||||
xpack.security.enabled: 'true'
|
||||
xpack.security.enrollment.enabled: 'false'
|
||||
xpack.security.http.ssl.enabled: 'false'
|
||||
ports:
|
||||
- ${ELASTICSEARCH_PORT:-9200}:9200
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 2g
|
||||
entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ]
|
||||
healthcheck:
|
||||
test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 50
|
||||
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.14.3
|
||||
container_name: kibana-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- elasticsearch
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
restart: always
|
||||
environment:
|
||||
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa
|
||||
NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana
|
||||
XPACK_SECURITY_ENABLED: 'true'
|
||||
XPACK_SECURITY_ENROLLMENT_ENABLED: 'false'
|
||||
XPACK_SECURITY_HTTP_SSL_ENABLED: 'false'
|
||||
XPACK_FLEET_ISAIRGAPPED: 'true'
|
||||
I18N_LOCALE: zh-CN
|
||||
SERVER_PORT: '5601'
|
||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
||||
ports:
|
||||
- ${KIBANA_PORT:-5601}:5601
|
||||
healthcheck:
|
||||
test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
||||
unstructured:
|
||||
image: downloads.unstructured.io/unstructured-io/unstructured-api:latest
|
||||
container_name: unstructured-${CONTAINER_NAME}
|
||||
profiles:
|
||||
- unstructured
|
||||
restart: always
|
||||
volumes:
|
||||
- ${DIFY_ROOT_PATH}/volumes/unstructured:/app/data
|
||||
|
||||
networks:
|
||||
ssrf_proxy_network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
milvus:
|
||||
driver: bridge
|
||||
opensearch-net:
|
||||
driver: bridge
|
||||
internal: true
|
||||
|
||||
volumes:
|
||||
oradata:
|
||||
dify_es01_data:
|
||||
1
apps/dify/1.1.1/envs/default.env
Normal file
@@ -0,0 +1 @@
|
||||
ENV_FILE=.env
|
||||
965
apps/dify/1.1.1/envs/dify.env
Normal file
@@ -0,0 +1,965 @@
|
||||
# ------------------------------
|
||||
# Environment Variables for API service & worker
|
||||
# ------------------------------
|
||||
|
||||
# ------------------------------
|
||||
# Common Variables
|
||||
# ------------------------------
|
||||
|
||||
# The backend URL of the console API,
|
||||
# used to concatenate the authorization callback.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://api.console.dify.ai
|
||||
CONSOLE_API_URL=
|
||||
|
||||
# The front-end URL of the console web,
|
||||
# used to concatenate some front-end addresses and for CORS configuration use.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://console.dify.ai
|
||||
CONSOLE_WEB_URL=
|
||||
|
||||
# Service API Url,
|
||||
# used to display Service API Base Url to the front-end.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://api.dify.ai
|
||||
SERVICE_API_URL=
|
||||
|
||||
# WebApp API backend Url,
|
||||
# used to declare the back-end URL for the front-end API.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://api.app.dify.ai
|
||||
APP_API_URL=
|
||||
|
||||
# WebApp Url,
|
||||
# used to display WebAPP API Base Url to the front-end.
|
||||
# If empty, it is the same domain.
|
||||
# Example: https://app.dify.ai
|
||||
APP_WEB_URL=
|
||||
|
||||
# File preview or download Url prefix.
|
||||
# used to display File preview or download Url to the front-end or as Multi-model inputs;
|
||||
# Url is signed and has expiration time.
|
||||
FILES_URL=
|
||||
|
||||
# ------------------------------
|
||||
# Server Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The log level for the application.
|
||||
# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`
|
||||
LOG_LEVEL=INFO
|
||||
# Log file path
|
||||
LOG_FILE=/app/logs/server.log
|
||||
# Log file max size, the unit is MB
|
||||
LOG_FILE_MAX_SIZE=20
|
||||
# Log file max backup count
|
||||
LOG_FILE_BACKUP_COUNT=5
|
||||
# Log dateformat
|
||||
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
|
||||
# Log Timezone
|
||||
LOG_TZ=UTC
|
||||
|
||||
# Debug mode, default is false.
|
||||
# It is recommended to turn on this configuration for local development
|
||||
# to prevent some problems caused by monkey patch.
|
||||
DEBUG=false
|
||||
|
||||
# Flask debug mode, it can output trace information at the interface when turned on,
|
||||
# which is convenient for debugging.
|
||||
FLASK_DEBUG=false
|
||||
|
||||
# A secretkey that is used for securely signing the session cookie
|
||||
# and encrypting sensitive information on the database.
|
||||
# You can generate a strong key using `openssl rand -base64 42`.
|
||||
SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
|
||||
|
||||
# Password for admin user initialization.
|
||||
# If left unset, admin user will not be prompted for a password
|
||||
# when creating the initial admin account.
|
||||
# The length of the password cannot exceed 30 charactors.
|
||||
INIT_PASSWORD=
|
||||
|
||||
# Deployment environment.
|
||||
# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`.
|
||||
# Testing environment. There will be a distinct color label on the front-end page,
|
||||
# indicating that this environment is a testing environment.
|
||||
DEPLOY_ENV=PRODUCTION
|
||||
|
||||
# Whether to enable the version check policy.
|
||||
# If set to empty, https://updates.dify.ai will be called for version check.
|
||||
CHECK_UPDATE_URL=https://updates.dify.ai
|
||||
|
||||
# Used to change the OpenAI base address, default is https://api.openai.com/v1.
|
||||
# When OpenAI cannot be accessed in China, replace it with a domestic mirror address,
|
||||
# or when a local model provides OpenAI compatible API, it can be replaced.
|
||||
OPENAI_API_BASE=https://api.openai.com/v1
|
||||
|
||||
# When enabled, migrations will be executed prior to application startup
|
||||
# and the application will start after the migrations have completed.
|
||||
MIGRATION_ENABLED=true
|
||||
|
||||
# File Access Time specifies a time interval in seconds for the file to be accessed.
|
||||
# The default value is 300 seconds.
|
||||
FILES_ACCESS_TIMEOUT=300
|
||||
|
||||
# Access token expiration time in minutes
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES=60
|
||||
|
||||
# Refresh token expiration time in days
|
||||
REFRESH_TOKEN_EXPIRE_DAYS=30
|
||||
|
||||
# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer.
|
||||
APP_MAX_ACTIVE_REQUESTS=0
|
||||
APP_MAX_EXECUTION_TIME=1200
|
||||
|
||||
# ------------------------------
|
||||
# Container Startup Related Configuration
|
||||
# Only effective when starting with docker image or docker-compose.
|
||||
# ------------------------------
|
||||
|
||||
# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed.
|
||||
DIFY_BIND_ADDRESS=0.0.0.0
|
||||
|
||||
# API service binding port number, default 5001.
|
||||
DIFY_PORT=5001
|
||||
|
||||
# The number of API server workers, i.e., the number of workers.
|
||||
# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent
|
||||
# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers
|
||||
SERVER_WORKER_AMOUNT=1
|
||||
|
||||
# Defaults to gevent. If using windows, it can be switched to sync or solo.
|
||||
SERVER_WORKER_CLASS=gevent
|
||||
|
||||
# Default number of worker connections, the default is 10.
|
||||
SERVER_WORKER_CONNECTIONS=10
|
||||
|
||||
# Similar to SERVER_WORKER_CLASS.
|
||||
# If using windows, it can be switched to sync or solo.
|
||||
CELERY_WORKER_CLASS=
|
||||
|
||||
# Request handling timeout. The default is 200,
|
||||
# it is recommended to set it to 360 to support a longer sse connection time.
|
||||
GUNICORN_TIMEOUT=360
|
||||
|
||||
# The number of Celery workers. The default is 1, and can be set as needed.
|
||||
CELERY_WORKER_AMOUNT=
|
||||
|
||||
# Flag indicating whether to enable autoscaling of Celery workers.
|
||||
#
|
||||
# Autoscaling is useful when tasks are CPU intensive and can be dynamically
|
||||
# allocated and deallocated based on the workload.
|
||||
#
|
||||
# When autoscaling is enabled, the maximum and minimum number of workers can
|
||||
# be specified. The autoscaling algorithm will dynamically adjust the number
|
||||
# of workers within the specified range.
|
||||
#
|
||||
# Default is false (i.e., autoscaling is disabled).
|
||||
#
|
||||
# Example:
|
||||
# CELERY_AUTO_SCALE=true
|
||||
CELERY_AUTO_SCALE=false
|
||||
|
||||
# The maximum number of Celery workers that can be autoscaled.
|
||||
# This is optional and only used when autoscaling is enabled.
|
||||
# Default is not set.
|
||||
CELERY_MAX_WORKERS=
|
||||
|
||||
# The minimum number of Celery workers that can be autoscaled.
|
||||
# This is optional and only used when autoscaling is enabled.
|
||||
# Default is not set.
|
||||
CELERY_MIN_WORKERS=
|
||||
|
||||
# API Tool configuration
|
||||
API_TOOL_DEFAULT_CONNECT_TIMEOUT=10
|
||||
API_TOOL_DEFAULT_READ_TIMEOUT=60
|
||||
|
||||
|
||||
# ------------------------------
|
||||
# Database Configuration
|
||||
# The database uses PostgreSQL. Please use the public schema.
|
||||
# It is consistent with the configuration in the 'db' service below.
|
||||
# ------------------------------
|
||||
|
||||
DB_USERNAME=postgres
|
||||
DB_PASSWORD=difyai123456
|
||||
DB_HOST=db
|
||||
DB_PORT=5432
|
||||
DB_DATABASE=dify
|
||||
# The size of the database connection pool.
|
||||
# The default is 30 connections, which can be appropriately increased.
|
||||
SQLALCHEMY_POOL_SIZE=30
|
||||
# Database connection pool recycling time, the default is 3600 seconds.
|
||||
SQLALCHEMY_POOL_RECYCLE=3600
|
||||
# Whether to print SQL, default is false.
|
||||
SQLALCHEMY_ECHO=false
|
||||
|
||||
# Maximum number of connections to the database
|
||||
# Default is 100
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS
|
||||
POSTGRES_MAX_CONNECTIONS=100
|
||||
|
||||
# Sets the amount of shared memory used for postgres's shared buffers.
|
||||
# Default is 128MB
|
||||
# Recommended value: 25% of available memory
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS
|
||||
POSTGRES_SHARED_BUFFERS=128MB
|
||||
|
||||
# Sets the amount of memory used by each database worker for working space.
|
||||
# Default is 4MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM
|
||||
POSTGRES_WORK_MEM=4MB
|
||||
|
||||
# Sets the amount of memory reserved for maintenance activities.
|
||||
# Default is 64MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM
|
||||
POSTGRES_MAINTENANCE_WORK_MEM=64MB
|
||||
|
||||
# Sets the planner's assumption about the effective cache size.
|
||||
# Default is 4096MB
|
||||
#
|
||||
# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE
|
||||
POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB
|
||||
|
||||
# ------------------------------
|
||||
# Redis Configuration
|
||||
# This Redis configuration is used for caching and for pub/sub during conversation.
|
||||
# ------------------------------
|
||||
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
REDIS_USERNAME=
|
||||
REDIS_PASSWORD=difyai123456
|
||||
REDIS_USE_SSL=false
|
||||
REDIS_DB=0
|
||||
|
||||
# Whether to use Redis Sentinel mode.
|
||||
# If set to true, the application will automatically discover and connect to the master node through Sentinel.
|
||||
REDIS_USE_SENTINEL=false
|
||||
|
||||
# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port.
|
||||
# Format: `<sentinel1_ip>:<sentinel1_port>,<sentinel2_ip>:<sentinel2_port>,<sentinel3_ip>:<sentinel3_port>`
|
||||
REDIS_SENTINELS=
|
||||
REDIS_SENTINEL_SERVICE_NAME=
|
||||
REDIS_SENTINEL_USERNAME=
|
||||
REDIS_SENTINEL_PASSWORD=
|
||||
REDIS_SENTINEL_SOCKET_TIMEOUT=0.1
|
||||
|
||||
# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port.
|
||||
# Format: `<Cluster1_ip>:<Cluster1_port>,<Cluster2_ip>:<Cluster2_port>,<Cluster3_ip>:<Cluster3_port>`
|
||||
REDIS_USE_CLUSTERS=false
|
||||
REDIS_CLUSTERS=
|
||||
REDIS_CLUSTERS_PASSWORD=
|
||||
|
||||
# ------------------------------
|
||||
# Celery Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Use redis as the broker, and redis db 1 for celery broker.
|
||||
# Format as follows: `redis://<redis_username>:<redis_password>@<redis_host>:<redis_port>/<redis_database>`
|
||||
# Example: redis://:difyai123456@redis:6379/1
|
||||
# If use Redis Sentinel, format as follows: `sentinel://<sentinel_username>:<sentinel_password>@<sentinel_host>:<sentinel_port>/<redis_database>`
|
||||
# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1
|
||||
CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1
|
||||
BROKER_USE_SSL=false
|
||||
|
||||
# If you are using Redis Sentinel for high availability, configure the following settings.
|
||||
CELERY_USE_SENTINEL=false
|
||||
CELERY_SENTINEL_MASTER_NAME=
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT=0.1
|
||||
|
||||
# ------------------------------
|
||||
# CORS Configuration
|
||||
# Used to set the front-end cross-domain access policy.
|
||||
# ------------------------------
|
||||
|
||||
# Specifies the allowed origins for cross-origin requests to the Web API,
|
||||
# e.g. https://dify.app or * for all origins.
|
||||
WEB_API_CORS_ALLOW_ORIGINS=*
|
||||
|
||||
# Specifies the allowed origins for cross-origin requests to the console API,
|
||||
# e.g. https://cloud.dify.ai or * for all origins.
|
||||
CONSOLE_CORS_ALLOW_ORIGINS=*
|
||||
|
||||
# ------------------------------
|
||||
# File Storage Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The type of storage to use for storing user files.
|
||||
STORAGE_TYPE=opendal
|
||||
|
||||
# Apache OpenDAL Configuration
|
||||
# The configuration for OpenDAL consists of the following format: OPENDAL_<SCHEME_NAME>_<CONFIG_NAME>.
|
||||
# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services.
|
||||
# Dify will scan configurations starting with OPENDAL_<SCHEME_NAME> and automatically apply them.
|
||||
# The scheme name for the OpenDAL storage.
|
||||
OPENDAL_SCHEME=fs
|
||||
# Configurations for OpenDAL Local File System.
|
||||
OPENDAL_FS_ROOT=storage
|
||||
|
||||
# S3 Configuration
|
||||
#
|
||||
S3_ENDPOINT=
|
||||
S3_REGION=us-east-1
|
||||
S3_BUCKET_NAME=difyai
|
||||
S3_ACCESS_KEY=
|
||||
S3_SECRET_KEY=
|
||||
# Whether to use AWS managed IAM roles for authenticating with the S3 service.
|
||||
# If set to false, the access key and secret key must be provided.
|
||||
S3_USE_AWS_MANAGED_IAM=false
|
||||
|
||||
# Azure Blob Configuration
|
||||
#
|
||||
AZURE_BLOB_ACCOUNT_NAME=difyai
|
||||
AZURE_BLOB_ACCOUNT_KEY=difyai
|
||||
AZURE_BLOB_CONTAINER_NAME=difyai-container
|
||||
AZURE_BLOB_ACCOUNT_URL=https://<your_account_name>.blob.core.windows.net
|
||||
|
||||
# Google Storage Configuration
|
||||
#
|
||||
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
|
||||
GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64=
|
||||
|
||||
# The Alibaba Cloud OSS configurations,
|
||||
#
|
||||
ALIYUN_OSS_BUCKET_NAME=your-bucket-name
|
||||
ALIYUN_OSS_ACCESS_KEY=your-access-key
|
||||
ALIYUN_OSS_SECRET_KEY=your-secret-key
|
||||
ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com
|
||||
ALIYUN_OSS_REGION=ap-southeast-1
|
||||
ALIYUN_OSS_AUTH_VERSION=v4
|
||||
# Don't start with '/'. OSS doesn't support leading slash in object names.
|
||||
ALIYUN_OSS_PATH=your-path
|
||||
|
||||
# Tencent COS Configuration
|
||||
#
|
||||
TENCENT_COS_BUCKET_NAME=your-bucket-name
|
||||
TENCENT_COS_SECRET_KEY=your-secret-key
|
||||
TENCENT_COS_SECRET_ID=your-secret-id
|
||||
TENCENT_COS_REGION=your-region
|
||||
TENCENT_COS_SCHEME=your-scheme
|
||||
|
||||
# Oracle Storage Configuration
|
||||
#
|
||||
OCI_ENDPOINT=https://objectstorage.us-ashburn-1.oraclecloud.com
|
||||
OCI_BUCKET_NAME=your-bucket-name
|
||||
OCI_ACCESS_KEY=your-access-key
|
||||
OCI_SECRET_KEY=your-secret-key
|
||||
OCI_REGION=us-ashburn-1
|
||||
|
||||
# Huawei OBS Configuration
|
||||
#
|
||||
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
|
||||
HUAWEI_OBS_SECRET_KEY=your-secret-key
|
||||
HUAWEI_OBS_ACCESS_KEY=your-access-key
|
||||
HUAWEI_OBS_SERVER=your-server-url
|
||||
|
||||
# Volcengine TOS Configuration
|
||||
#
|
||||
VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name
|
||||
VOLCENGINE_TOS_SECRET_KEY=your-secret-key
|
||||
VOLCENGINE_TOS_ACCESS_KEY=your-access-key
|
||||
VOLCENGINE_TOS_ENDPOINT=your-server-url
|
||||
VOLCENGINE_TOS_REGION=your-region
|
||||
|
||||
# Baidu OBS Storage Configuration
|
||||
#
|
||||
BAIDU_OBS_BUCKET_NAME=your-bucket-name
|
||||
BAIDU_OBS_SECRET_KEY=your-secret-key
|
||||
BAIDU_OBS_ACCESS_KEY=your-access-key
|
||||
BAIDU_OBS_ENDPOINT=your-server-url
|
||||
|
||||
# Supabase Storage Configuration
|
||||
#
|
||||
SUPABASE_BUCKET_NAME=your-bucket-name
|
||||
SUPABASE_API_KEY=your-access-key
|
||||
SUPABASE_URL=your-server-url
|
||||
|
||||
# ------------------------------
|
||||
# Vector Database Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The type of vector store to use.
|
||||
# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`.
|
||||
VECTOR_STORE=weaviate
|
||||
|
||||
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
|
||||
WEAVIATE_ENDPOINT=http://weaviate:8080
|
||||
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
|
||||
# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`.
|
||||
QDRANT_URL=http://qdrant:6333
|
||||
QDRANT_API_KEY=difyai123456
|
||||
QDRANT_CLIENT_TIMEOUT=20
|
||||
QDRANT_GRPC_ENABLED=false
|
||||
QDRANT_GRPC_PORT=6334
|
||||
|
||||
# Milvus configuration Only available when VECTOR_STORE is `milvus`.
|
||||
# The milvus uri.
|
||||
MILVUS_URI=http://127.0.0.1:19530
|
||||
MILVUS_TOKEN=
|
||||
MILVUS_USER=root
|
||||
MILVUS_PASSWORD=Milvus
|
||||
MILVUS_ENABLE_HYBRID_SEARCH=False
|
||||
|
||||
# MyScale configuration, only available when VECTOR_STORE is `myscale`
|
||||
# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to:
|
||||
# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters
|
||||
MYSCALE_HOST=myscale
|
||||
MYSCALE_PORT=8123
|
||||
MYSCALE_USER=default
|
||||
MYSCALE_PASSWORD=
|
||||
MYSCALE_DATABASE=dify
|
||||
MYSCALE_FTS_PARAMS=
|
||||
|
||||
# Couchbase configurations, only available when VECTOR_STORE is `couchbase`
|
||||
# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case)
|
||||
COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server
|
||||
COUCHBASE_USER=Administrator
|
||||
COUCHBASE_PASSWORD=password
|
||||
COUCHBASE_BUCKET_NAME=Embeddings
|
||||
COUCHBASE_SCOPE_NAME=_default
|
||||
|
||||
# pgvector configurations, only available when VECTOR_STORE is `pgvector`
|
||||
PGVECTOR_HOST=pgvector
|
||||
PGVECTOR_PORT=5432
|
||||
PGVECTOR_USER=postgres
|
||||
PGVECTOR_PASSWORD=difyai123456
|
||||
PGVECTOR_DATABASE=dify
|
||||
PGVECTOR_MIN_CONNECTION=1
|
||||
PGVECTOR_MAX_CONNECTION=5
|
||||
|
||||
# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs`
|
||||
PGVECTO_RS_HOST=pgvecto-rs
|
||||
PGVECTO_RS_PORT=5432
|
||||
PGVECTO_RS_USER=postgres
|
||||
PGVECTO_RS_PASSWORD=difyai123456
|
||||
PGVECTO_RS_DATABASE=dify
|
||||
|
||||
# analyticdb configurations, only available when VECTOR_STORE is `analyticdb`
|
||||
ANALYTICDB_KEY_ID=your-ak
|
||||
ANALYTICDB_KEY_SECRET=your-sk
|
||||
ANALYTICDB_REGION_ID=cn-hangzhou
|
||||
ANALYTICDB_INSTANCE_ID=gp-ab123456
|
||||
ANALYTICDB_ACCOUNT=testaccount
|
||||
ANALYTICDB_PASSWORD=testpassword
|
||||
ANALYTICDB_NAMESPACE=dify
|
||||
ANALYTICDB_NAMESPACE_PASSWORD=difypassword
|
||||
ANALYTICDB_HOST=gp-test.aliyuncs.com
|
||||
ANALYTICDB_PORT=5432
|
||||
ANALYTICDB_MIN_CONNECTION=1
|
||||
ANALYTICDB_MAX_CONNECTION=5
|
||||
|
||||
# TiDB vector configurations, only available when VECTOR_STORE is `tidb`
|
||||
TIDB_VECTOR_HOST=tidb
|
||||
TIDB_VECTOR_PORT=4000
|
||||
TIDB_VECTOR_USER=
|
||||
TIDB_VECTOR_PASSWORD=
|
||||
TIDB_VECTOR_DATABASE=dify
|
||||
|
||||
# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant`
|
||||
TIDB_ON_QDRANT_URL=http://127.0.0.1
|
||||
TIDB_ON_QDRANT_API_KEY=dify
|
||||
TIDB_ON_QDRANT_CLIENT_TIMEOUT=20
|
||||
TIDB_ON_QDRANT_GRPC_ENABLED=false
|
||||
TIDB_ON_QDRANT_GRPC_PORT=6334
|
||||
TIDB_PUBLIC_KEY=dify
|
||||
TIDB_PRIVATE_KEY=dify
|
||||
TIDB_API_URL=http://127.0.0.1
|
||||
TIDB_IAM_API_URL=http://127.0.0.1
|
||||
TIDB_REGION=regions/aws-us-east-1
|
||||
TIDB_PROJECT_ID=dify
|
||||
TIDB_SPEND_LIMIT=100
|
||||
|
||||
# Chroma configuration, only available when VECTOR_STORE is `chroma`
|
||||
CHROMA_HOST=127.0.0.1
|
||||
CHROMA_PORT=8000
|
||||
CHROMA_TENANT=default_tenant
|
||||
CHROMA_DATABASE=default_database
|
||||
CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider
|
||||
CHROMA_AUTH_CREDENTIALS=
|
||||
|
||||
# Oracle configuration, only available when VECTOR_STORE is `oracle`
|
||||
ORACLE_HOST=oracle
|
||||
ORACLE_PORT=1521
|
||||
ORACLE_USER=dify
|
||||
ORACLE_PASSWORD=dify
|
||||
ORACLE_DATABASE=FREEPDB1
|
||||
|
||||
# relyt configurations, only available when VECTOR_STORE is `relyt`
|
||||
RELYT_HOST=db
|
||||
RELYT_PORT=5432
|
||||
RELYT_USER=postgres
|
||||
RELYT_PASSWORD=difyai123456
|
||||
RELYT_DATABASE=postgres
|
||||
|
||||
# open search configuration, only available when VECTOR_STORE is `opensearch`
|
||||
OPENSEARCH_HOST=opensearch
|
||||
OPENSEARCH_PORT=9200
|
||||
OPENSEARCH_USER=admin
|
||||
OPENSEARCH_PASSWORD=admin
|
||||
OPENSEARCH_SECURE=true
|
||||
|
||||
# tencent vector configurations, only available when VECTOR_STORE is `tencent`
|
||||
TENCENT_VECTOR_DB_URL=http://127.0.0.1
|
||||
TENCENT_VECTOR_DB_API_KEY=dify
|
||||
TENCENT_VECTOR_DB_TIMEOUT=30
|
||||
TENCENT_VECTOR_DB_USERNAME=dify
|
||||
TENCENT_VECTOR_DB_DATABASE=dify
|
||||
TENCENT_VECTOR_DB_SHARD=1
|
||||
TENCENT_VECTOR_DB_REPLICAS=2
|
||||
|
||||
# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch`
|
||||
ELASTICSEARCH_HOST=0.0.0.0
|
||||
ELASTICSEARCH_PORT=9200
|
||||
ELASTICSEARCH_USERNAME=elastic
|
||||
ELASTICSEARCH_PASSWORD=elastic
|
||||
KIBANA_PORT=5601
|
||||
|
||||
# baidu vector configurations, only available when VECTOR_STORE is `baidu`
|
||||
BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287
|
||||
BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000
|
||||
BAIDU_VECTOR_DB_ACCOUNT=root
|
||||
BAIDU_VECTOR_DB_API_KEY=dify
|
||||
BAIDU_VECTOR_DB_DATABASE=dify
|
||||
BAIDU_VECTOR_DB_SHARD=1
|
||||
BAIDU_VECTOR_DB_REPLICAS=3
|
||||
|
||||
# VikingDB configurations, only available when VECTOR_STORE is `vikingdb`
|
||||
VIKINGDB_ACCESS_KEY=your-ak
|
||||
VIKINGDB_SECRET_KEY=your-sk
|
||||
VIKINGDB_REGION=cn-shanghai
|
||||
VIKINGDB_HOST=api-vikingdb.xxx.volces.com
|
||||
VIKINGDB_SCHEMA=http
|
||||
VIKINGDB_CONNECTION_TIMEOUT=30
|
||||
VIKINGDB_SOCKET_TIMEOUT=30
|
||||
|
||||
# Lindorm configuration, only available when VECTOR_STORE is `lindorm`
|
||||
LINDORM_URL=http://lindorm:30070
|
||||
LINDORM_USERNAME=lindorm
|
||||
LINDORM_PASSWORD=lindorm
|
||||
|
||||
# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase`
|
||||
OCEANBASE_VECTOR_HOST=oceanbase
|
||||
OCEANBASE_VECTOR_PORT=2881
|
||||
OCEANBASE_VECTOR_USER=root@test
|
||||
OCEANBASE_VECTOR_PASSWORD=difyai123456
|
||||
OCEANBASE_VECTOR_DATABASE=test
|
||||
OCEANBASE_CLUSTER_NAME=difyai
|
||||
OCEANBASE_MEMORY_LIMIT=6G
|
||||
|
||||
# Upstash Vector configuration, only available when VECTOR_STORE is `upstash`
|
||||
UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io
|
||||
UPSTASH_VECTOR_TOKEN=dify
|
||||
|
||||
# ------------------------------
|
||||
# Knowledge Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Upload file size limit, default 15M.
|
||||
UPLOAD_FILE_SIZE_LIMIT=15
|
||||
|
||||
# The maximum number of files that can be uploaded at a time, default 5.
|
||||
UPLOAD_FILE_BATCH_LIMIT=5
|
||||
|
||||
# ETL type, support: `dify`, `Unstructured`
|
||||
# `dify` Dify's proprietary file extraction scheme
|
||||
# `Unstructured` Unstructured.io file extraction scheme
|
||||
ETL_TYPE=dify
|
||||
|
||||
# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured
|
||||
# Or using Unstructured for document extractor node for pptx.
|
||||
# For example: http://unstructured:8000/general/v0/general
|
||||
UNSTRUCTURED_API_URL=
|
||||
UNSTRUCTURED_API_KEY=
|
||||
SCARF_NO_ANALYTICS=true
|
||||
|
||||
# ------------------------------
|
||||
# Model Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The maximum number of tokens allowed for prompt generation.
|
||||
# This setting controls the upper limit of tokens that can be used by the LLM
|
||||
# when generating a prompt in the prompt generation tool.
|
||||
# Default: 512 tokens.
|
||||
PROMPT_GENERATION_MAX_TOKENS=512
|
||||
|
||||
# The maximum number of tokens allowed for code generation.
|
||||
# This setting controls the upper limit of tokens that can be used by the LLM
|
||||
# when generating code in the code generation tool.
|
||||
# Default: 1024 tokens.
|
||||
CODE_GENERATION_MAX_TOKENS=1024
|
||||
|
||||
# ------------------------------
|
||||
# Multi-modal Configuration
|
||||
# ------------------------------
|
||||
|
||||
# The format of the image/video/audio/document sent when the multi-modal model is input,
|
||||
# the default is base64, optional url.
|
||||
# The delay of the call in url mode will be lower than that in base64 mode.
|
||||
# It is generally recommended to use the more compatible base64 mode.
|
||||
# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document.
|
||||
MULTIMODAL_SEND_FORMAT=base64
|
||||
# Upload image file size limit, default 10M.
|
||||
UPLOAD_IMAGE_FILE_SIZE_LIMIT=10
|
||||
# Upload video file size limit, default 100M.
|
||||
UPLOAD_VIDEO_FILE_SIZE_LIMIT=100
|
||||
# Upload audio file size limit, default 50M.
|
||||
UPLOAD_AUDIO_FILE_SIZE_LIMIT=50
|
||||
|
||||
# ------------------------------
|
||||
# Sentry Configuration
|
||||
# Used for application monitoring and error log tracking.
|
||||
# ------------------------------
|
||||
SENTRY_DSN=
|
||||
|
||||
# API Service Sentry DSN address, default is empty, when empty,
|
||||
# all monitoring information is not reported to Sentry.
|
||||
# If not set, Sentry error reporting will be disabled.
|
||||
API_SENTRY_DSN=
|
||||
# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%.
|
||||
API_SENTRY_TRACES_SAMPLE_RATE=1.0
|
||||
# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%.
|
||||
API_SENTRY_PROFILES_SAMPLE_RATE=1.0
|
||||
|
||||
# Web Service Sentry DSN address, default is empty, when empty,
|
||||
# all monitoring information is not reported to Sentry.
|
||||
# If not set, Sentry error reporting will be disabled.
|
||||
WEB_SENTRY_DSN=
|
||||
|
||||
# ------------------------------
|
||||
# Notion Integration Configuration
|
||||
# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations
|
||||
# ------------------------------
|
||||
|
||||
# Configure as "public" or "internal".
|
||||
# Since Notion's OAuth redirect URL only supports HTTPS,
|
||||
# if deploying locally, please use Notion's internal integration.
|
||||
NOTION_INTEGRATION_TYPE=public
|
||||
# Notion OAuth client secret (used for public integration type)
|
||||
NOTION_CLIENT_SECRET=
|
||||
# Notion OAuth client id (used for public integration type)
|
||||
NOTION_CLIENT_ID=
|
||||
# Notion internal integration secret.
|
||||
# If the value of NOTION_INTEGRATION_TYPE is "internal",
|
||||
# you need to configure this variable.
|
||||
NOTION_INTERNAL_SECRET=
|
||||
|
||||
# ------------------------------
|
||||
# Mail related configuration
|
||||
# ------------------------------
|
||||
|
||||
# Mail type, support: resend, smtp
|
||||
MAIL_TYPE=resend
|
||||
|
||||
# Default send from email address, if not specified
|
||||
MAIL_DEFAULT_SEND_FROM=
|
||||
|
||||
# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`.
|
||||
RESEND_API_URL=https://api.resend.com
|
||||
RESEND_API_KEY=your-resend-api-key
|
||||
|
||||
|
||||
# SMTP server configuration, used when MAIL_TYPE is `smtp`
|
||||
SMTP_SERVER=
|
||||
SMTP_PORT=465
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
SMTP_USE_TLS=true
|
||||
SMTP_OPPORTUNISTIC_TLS=false
|
||||
|
||||
# ------------------------------
|
||||
# Others Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Maximum length of segmentation tokens for indexing
|
||||
INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000
|
||||
|
||||
# Member invitation link valid time (hours),
|
||||
# Default: 72.
|
||||
INVITE_EXPIRY_HOURS=72
|
||||
|
||||
# Reset password token valid time (minutes),
|
||||
RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5
|
||||
|
||||
# The sandbox service endpoint.
|
||||
CODE_EXECUTION_ENDPOINT=http://sandbox:8194
|
||||
CODE_EXECUTION_API_KEY=dify-sandbox
|
||||
CODE_MAX_NUMBER=9223372036854775807
|
||||
CODE_MIN_NUMBER=-9223372036854775808
|
||||
CODE_MAX_DEPTH=5
|
||||
CODE_MAX_PRECISION=20
|
||||
CODE_MAX_STRING_LENGTH=80000
|
||||
CODE_MAX_STRING_ARRAY_LENGTH=30
|
||||
CODE_MAX_OBJECT_ARRAY_LENGTH=30
|
||||
CODE_MAX_NUMBER_ARRAY_LENGTH=1000
|
||||
CODE_EXECUTION_CONNECT_TIMEOUT=10
|
||||
CODE_EXECUTION_READ_TIMEOUT=60
|
||||
CODE_EXECUTION_WRITE_TIMEOUT=10
|
||||
TEMPLATE_TRANSFORM_MAX_LENGTH=80000
|
||||
|
||||
# Workflow runtime configuration
|
||||
WORKFLOW_MAX_EXECUTION_STEPS=500
|
||||
WORKFLOW_MAX_EXECUTION_TIME=1200
|
||||
WORKFLOW_CALL_MAX_DEPTH=5
|
||||
MAX_VARIABLE_SIZE=204800
|
||||
WORKFLOW_PARALLEL_DEPTH_LIMIT=3
|
||||
WORKFLOW_FILE_UPLOAD_LIMIT=10
|
||||
|
||||
# HTTP request node in workflow configuration
|
||||
HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
|
||||
HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
|
||||
|
||||
# SSRF Proxy server HTTP URL
|
||||
SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128
|
||||
# SSRF Proxy server HTTPS URL
|
||||
SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for web Service
|
||||
# ------------------------------
|
||||
|
||||
# The timeout for the text generation in millisecond
|
||||
TEXT_GENERATION_TIMEOUT_MS=60000
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for db Service
|
||||
# ------------------------------
|
||||
|
||||
PGUSER=${DB_USERNAME}
|
||||
# The password for the default postgres user.
|
||||
POSTGRES_PASSWORD=${DB_PASSWORD}
|
||||
# The name of the default postgres database.
|
||||
POSTGRES_DB=${DB_DATABASE}
|
||||
# postgres data directory
|
||||
PGDATA=/var/lib/postgresql/data/pgdata
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for sandbox Service
|
||||
# ------------------------------
|
||||
|
||||
# The API key for the sandbox service
|
||||
SANDBOX_API_KEY=dify-sandbox
|
||||
# The mode in which the Gin framework runs
|
||||
SANDBOX_GIN_MODE=release
|
||||
# The timeout for the worker in seconds
|
||||
SANDBOX_WORKER_TIMEOUT=15
|
||||
# Enable network for the sandbox service
|
||||
SANDBOX_ENABLE_NETWORK=true
|
||||
# HTTP proxy URL for SSRF protection
|
||||
SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128
|
||||
# HTTPS proxy URL for SSRF protection
|
||||
SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128
|
||||
# The port on which the sandbox service runs
|
||||
SANDBOX_PORT=8194
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for weaviate Service
|
||||
# (only used when VECTOR_STORE is weaviate)
|
||||
# ------------------------------
|
||||
WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate
|
||||
WEAVIATE_QUERY_DEFAULTS_LIMIT=25
|
||||
WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
|
||||
WEAVIATE_DEFAULT_VECTORIZER_MODULE=none
|
||||
WEAVIATE_CLUSTER_HOSTNAME=node1
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
|
||||
WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true
|
||||
WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Chroma
|
||||
# (only used when VECTOR_STORE is chroma)
|
||||
# ------------------------------
|
||||
|
||||
# Authentication credentials for Chroma server
|
||||
CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456
|
||||
# Authentication provider for Chroma server
|
||||
CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider
|
||||
# Persistence setting for Chroma server
|
||||
CHROMA_IS_PERSISTENT=TRUE
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Oracle Service
|
||||
# (only used when VECTOR_STORE is Oracle)
|
||||
# ------------------------------
|
||||
ORACLE_PWD=Dify123456
|
||||
ORACLE_CHARACTERSET=AL32UTF8
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for milvus Service
|
||||
# (only used when VECTOR_STORE is milvus)
|
||||
# ------------------------------
|
||||
# ETCD configuration for auto compaction mode
|
||||
ETCD_AUTO_COMPACTION_MODE=revision
|
||||
# ETCD configuration for auto compaction retention in terms of number of revisions
|
||||
ETCD_AUTO_COMPACTION_RETENTION=1000
|
||||
# ETCD configuration for backend quota in bytes
|
||||
ETCD_QUOTA_BACKEND_BYTES=4294967296
|
||||
# ETCD configuration for the number of changes before triggering a snapshot
|
||||
ETCD_SNAPSHOT_COUNT=50000
|
||||
# MinIO access key for authentication
|
||||
MINIO_ACCESS_KEY=minioadmin
|
||||
# MinIO secret key for authentication
|
||||
MINIO_SECRET_KEY=minioadmin
|
||||
# ETCD service endpoints
|
||||
ETCD_ENDPOINTS=etcd:2379
|
||||
# MinIO service address
|
||||
MINIO_ADDRESS=minio:9000
|
||||
# Enable or disable security authorization
|
||||
MILVUS_AUTHORIZATION_ENABLED=true
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for pgvector / pgvector-rs Service
|
||||
# (only used when VECTOR_STORE is pgvector / pgvector-rs)
|
||||
# ------------------------------
|
||||
PGVECTOR_PGUSER=postgres
|
||||
# The password for the default postgres user.
|
||||
PGVECTOR_POSTGRES_PASSWORD=difyai123456
|
||||
# The name of the default postgres database.
|
||||
PGVECTOR_POSTGRES_DB=dify
|
||||
# postgres data directory
|
||||
PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for opensearch
|
||||
# (only used when VECTOR_STORE is opensearch)
|
||||
# ------------------------------
|
||||
OPENSEARCH_DISCOVERY_TYPE=single-node
|
||||
OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true
|
||||
OPENSEARCH_JAVA_OPTS_MIN=512m
|
||||
OPENSEARCH_JAVA_OPTS_MAX=1024m
|
||||
OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123
|
||||
OPENSEARCH_MEMLOCK_SOFT=-1
|
||||
OPENSEARCH_MEMLOCK_HARD=-1
|
||||
OPENSEARCH_NOFILE_SOFT=65536
|
||||
OPENSEARCH_NOFILE_HARD=65536
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for Nginx reverse proxy
|
||||
# ------------------------------
|
||||
NGINX_SERVER_NAME=_
|
||||
NGINX_HTTPS_ENABLED=false
|
||||
# HTTP port
|
||||
NGINX_PORT=80
|
||||
# SSL settings are only applied when HTTPS_ENABLED is true
|
||||
NGINX_SSL_PORT=443
|
||||
# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory
|
||||
# and modify the env vars below accordingly.
|
||||
NGINX_SSL_CERT_FILENAME=dify.crt
|
||||
NGINX_SSL_CERT_KEY_FILENAME=dify.key
|
||||
NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3
|
||||
|
||||
# Nginx performance tuning
|
||||
NGINX_WORKER_PROCESSES=auto
|
||||
NGINX_CLIENT_MAX_BODY_SIZE=15M
|
||||
NGINX_KEEPALIVE_TIMEOUT=65
|
||||
|
||||
# Proxy settings
|
||||
NGINX_PROXY_READ_TIMEOUT=3600s
|
||||
NGINX_PROXY_SEND_TIMEOUT=3600s
|
||||
|
||||
# Set true to accept requests for /.well-known/acme-challenge/
|
||||
NGINX_ENABLE_CERTBOT_CHALLENGE=false
|
||||
|
||||
# ------------------------------
|
||||
# Certbot Configuration
|
||||
# ------------------------------
|
||||
|
||||
# Email address (required to get certificates from Let's Encrypt)
|
||||
CERTBOT_EMAIL=your_email@example.com
|
||||
|
||||
# Domain name
|
||||
CERTBOT_DOMAIN=your_domain.com
|
||||
|
||||
# certbot command options
|
||||
# i.e: --force-renewal --dry-run --test-cert --debug
|
||||
CERTBOT_OPTIONS=
|
||||
|
||||
# ------------------------------
|
||||
# Environment Variables for SSRF Proxy
|
||||
# ------------------------------
|
||||
SSRF_HTTP_PORT=3128
|
||||
SSRF_COREDUMP_DIR=/var/spool/squid
|
||||
SSRF_REVERSE_PROXY_PORT=8194
|
||||
SSRF_SANDBOX_HOST=sandbox
|
||||
SSRF_DEFAULT_TIME_OUT=5
|
||||
SSRF_DEFAULT_CONNECT_TIME_OUT=5
|
||||
SSRF_DEFAULT_READ_TIME_OUT=5
|
||||
SSRF_DEFAULT_WRITE_TIME_OUT=5
|
||||
|
||||
# ------------------------------
|
||||
# docker env var for specifying vector db type at startup
|
||||
# (based on the vector db type, the corresponding docker
|
||||
# compose profile will be used)
|
||||
# if you want to use unstructured, add ',unstructured' to the end
|
||||
# ------------------------------
|
||||
COMPOSE_PROFILES=${VECTOR_STORE:-weaviate}
|
||||
|
||||
# ------------------------------
|
||||
# Docker Compose Service Expose Host Port Configurations
|
||||
# ------------------------------
|
||||
EXPOSE_NGINX_PORT=80
|
||||
EXPOSE_NGINX_SSL_PORT=443
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# ModelProvider & Tool Position Configuration
|
||||
# Used to specify the model providers and tools that can be used in the app.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# Pin, include, and exclude tools
|
||||
# Use comma-separated values with no spaces between items.
|
||||
# Example: POSITION_TOOL_PINS=bing,google
|
||||
POSITION_TOOL_PINS=
|
||||
POSITION_TOOL_INCLUDES=
|
||||
POSITION_TOOL_EXCLUDES=
|
||||
|
||||
# Pin, include, and exclude model providers
|
||||
# Use comma-separated values with no spaces between items.
|
||||
# Example: POSITION_PROVIDER_PINS=openai,openllm
|
||||
POSITION_PROVIDER_PINS=
|
||||
POSITION_PROVIDER_INCLUDES=
|
||||
POSITION_PROVIDER_EXCLUDES=
|
||||
|
||||
# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
|
||||
CSP_WHITELIST=
|
||||
|
||||
# Enable or disable create tidb service job
|
||||
CREATE_TIDB_SERVICE_JOB_ENABLED=false
|
||||
|
||||
# Maximum number of submitted thread count in a ThreadPool for parallel node execution
|
||||
MAX_SUBMIT_COUNT=100
|
||||
|
||||
# The maximum number of top-k value for RAG.
|
||||
TOP_K_MAX_VALUE=10
|
||||
|
||||
# ------------------------------
|
||||
# Plugin Daemon Configuration
|
||||
# ------------------------------
|
||||
|
||||
DB_PLUGIN_DATABASE=dify_plugin
|
||||
EXPOSE_PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_PORT=5002
|
||||
PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi
|
||||
PLUGIN_DAEMON_URL=http://plugin_daemon:5002
|
||||
PLUGIN_MAX_PACKAGE_SIZE=52428800
|
||||
PLUGIN_PPROF_ENABLED=false
|
||||
|
||||
PLUGIN_DEBUGGING_HOST=0.0.0.0
|
||||
PLUGIN_DEBUGGING_PORT=5003
|
||||
EXPOSE_PLUGIN_DEBUGGING_HOST=localhost
|
||||
EXPOSE_PLUGIN_DEBUGGING_PORT=5003
|
||||
|
||||
PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1
|
||||
PLUGIN_DIFY_INNER_API_URL=http://api:5001
|
||||
|
||||
ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id}
|
||||
|
||||
MARKETPLACE_ENABLED=true
|
||||
MARKETPLACE_API_URL=https://marketplace.dify.ai
|
||||
|
||||
FORCE_VERIFYING_SIGNATURE=true
|
||||
1
apps/dify/1.1.1/envs/global.env
Normal file
@@ -0,0 +1 @@
|
||||
TZ=Asia/Shanghai
|
||||
36
apps/dify/1.1.1/scripts/init.sh
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -f .env ]; then
|
||||
source .env
|
||||
|
||||
# setup-1 add default values
|
||||
CURRENT_DIR=$(pwd)
|
||||
sed -i '/^ENV_FILE=/d' .env
|
||||
sed -i '/^GLOBAL_ENV_FILE=/d' .env
|
||||
echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env
|
||||
echo "GLOBAL_ENV_FILE=${CURRENT_DIR}/envs/global.env" >> .env
|
||||
echo "APP_ENV_FILE=${CURRENT_DIR}/envs/dify.env" >> .env
|
||||
|
||||
# setup-2 update dir permissions
|
||||
mkdir -p "$DIFY_ROOT_PATH"
|
||||
|
||||
cp -r conf/. "$DIFY_ROOT_PATH/"
|
||||
|
||||
# setup-3 sync environment variables
|
||||
env_source="envs/dify.env"
|
||||
if [ -f "$env_source" ]; then
|
||||
while IFS='=' read -r key value; do
|
||||
if [[ -z "$key" || "$key" =~ ^# ]]; then
|
||||
continue
|
||||
fi
|
||||
if ! grep -q "^$key=" .env; then
|
||||
echo "$key=$value" >> .env
|
||||
fi
|
||||
done < "$env_source"
|
||||
fi
|
||||
|
||||
echo "Check Finish."
|
||||
|
||||
else
|
||||
echo "Error: .env file not found."
|
||||
fi
|
||||
10
apps/dify/1.1.1/scripts/uninstall.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -f .env ]; then
|
||||
source .env
|
||||
|
||||
echo "Check Finish."
|
||||
|
||||
else
|
||||
echo "Error: .env file not found."
|
||||
fi
|
||||
47
apps/dify/1.1.1/scripts/upgrade.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -f .env ]; then
|
||||
source .env
|
||||
|
||||
# setup-1 add default values
|
||||
CURRENT_DIR=$(pwd)
|
||||
sed -i '/^ENV_FILE=/d' .env
|
||||
sed -i '/^GLOBAL_ENV_FILE=/d' .env
|
||||
echo "ENV_FILE=${CURRENT_DIR}/.env" >> .env
|
||||
echo "GLOBAL_ENV_FILE=${CURRENT_DIR}/envs/global.env" >> .env
|
||||
echo "APP_ENV_FILE=${CURRENT_DIR}/envs/dify.env" >> .env
|
||||
|
||||
# setup-2 update dir permissions
|
||||
mkdir -p "$DIFY_ROOT_PATH"
|
||||
|
||||
if [ -d "conf" ]; then
|
||||
find conf -type f | while read -r file; do
|
||||
dest="$DIFY_ROOT_PATH/${file#conf/}"
|
||||
if [ ! -e "$dest" ]; then
|
||||
mkdir -p "$(dirname "$dest")"
|
||||
cp "$file" "$dest"
|
||||
fi
|
||||
done
|
||||
echo "Conf files copied to $DIFY_ROOT_PATH."
|
||||
else
|
||||
echo "Warning: conf directory not found."
|
||||
fi
|
||||
|
||||
# setup-3 sync environment variables
|
||||
env_source="envs/dify.env"
|
||||
if [ -f "$env_source" ]; then
|
||||
while IFS='=' read -r key value; do
|
||||
if [[ -z "$key" || "$key" =~ ^# ]]; then
|
||||
continue
|
||||
fi
|
||||
if ! grep -q "^$key=" .env; then
|
||||
echo "$key=$value" >> .env
|
||||
fi
|
||||
done < "$env_source"
|
||||
fi
|
||||
|
||||
echo "Check Finish."
|
||||
|
||||
else
|
||||
echo "Error: .env file not found."
|
||||
fi
|
||||
95
apps/dify/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
Dify 是一个开源的 LLM 应用开发平台。其直观的界面结合了 AI 工作流、RAG 管道、Agent、模型管理、可观测性功能等,让您可以快速从原型到生产。以下是其核心功能列表:
|
||||
</br> </br>
|
||||
|
||||

|
||||
|
||||
**1. 工作流**:
|
||||
在画布上构建和测试功能强大的 AI 工作流程,利用以下所有功能以及更多功能。
|
||||
|
||||
**2. 全面的模型支持**:
|
||||
与数百种专有/开源 LLMs 以及数十种推理提供商和自托管解决方案无缝集成,涵盖 GPT、Mistral、Llama3 以及任何与 OpenAI API 兼容的模型。完整的支持模型提供商列表可在[此处](https://docs.dify.ai/getting-started/readme/model-providers)找到。
|
||||
|
||||

|
||||
|
||||
**3. Prompt IDE**:
|
||||
用于制作提示、比较模型性能以及向基于聊天的应用程序添加其他功能(如文本转语音)的直观界面。
|
||||
|
||||
**4. RAG Pipeline**:
|
||||
广泛的 RAG 功能,涵盖从文档摄入到检索的所有内容,支持从 PDF、PPT 和其他常见文档格式中提取文本的开箱即用的支持。
|
||||
|
||||
**5. Agent 智能体**:
|
||||
您可以基于 LLM 函数调用或 ReAct 定义 Agent,并为 Agent 添加预构建或自定义工具。Dify 为 AI Agent 提供了50多种内置工具,如谷歌搜索、DALL·E、Stable Diffusion 和 WolframAlpha 等。
|
||||
|
||||
**6. LLMOps**:
|
||||
随时间监视和分析应用程序日志和性能。您可以根据生产数据和标注持续改进提示、数据集和模型。
|
||||
|
||||
**7. 后端即服务**:
|
||||
所有 Dify 的功能都带有相应的 API,因此您可以轻松地将 Dify 集成到自己的业务逻辑中。
|
||||
|
||||
|
||||
## 功能比较
|
||||
<table style="width: 100%;">
|
||||
<tr>
|
||||
<th align="center">功能</th>
|
||||
<th align="center">Dify.AI</th>
|
||||
<th align="center">LangChain</th>
|
||||
<th align="center">Flowise</th>
|
||||
<th align="center">OpenAI Assistant API</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">编程方法</td>
|
||||
<td align="center">API + 应用程序导向</td>
|
||||
<td align="center">Python 代码</td>
|
||||
<td align="center">应用程序导向</td>
|
||||
<td align="center">API 导向</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">支持的 LLMs</td>
|
||||
<td align="center">丰富多样</td>
|
||||
<td align="center">丰富多样</td>
|
||||
<td align="center">丰富多样</td>
|
||||
<td align="center">仅限 OpenAI</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">RAG引擎</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">Agent</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">✅</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">工作流</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">可观测性</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">❌</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">企业功能(SSO/访问控制)</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">❌</td>
|
||||
<td align="center">❌</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center">本地部署</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">✅</td>
|
||||
<td align="center">❌</td>
|
||||
</tr>
|
||||
</table>
|
||||
19
apps/dify/data.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Dify
|
||||
tags:
|
||||
- AI / 大模型
|
||||
title: Dify 是一个开源的 LLM 应用开发平台
|
||||
description: Dify 是一个开源的 LLM 应用开发平台。支持 AI 工作流、RAG 管道、Agent、模型管理、可观测性等功能
|
||||
additionalProperties:
|
||||
key: dify
|
||||
name: Dify
|
||||
tags:
|
||||
- AI
|
||||
shortDescZh: Dify 是一个开源的 LLM 应用开发平台
|
||||
shortDescEn: Dify is an open-source LLM application development platform
|
||||
type: tool
|
||||
crossVersionUpdate: true
|
||||
limit: 0
|
||||
recommend: 0
|
||||
website: https://dify.ai/
|
||||
github: https://github.com/langgenius/dify/
|
||||
document: https://docs.dify.ai/
|
||||
BIN
apps/dify/logo.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
82
apps/diun/README.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Diun
|
||||
|
||||
Diun 是一个用于监控 Docker 镜像更新并发送通知的命令行工具。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- 自动监控 Docker 镜像更新
|
||||
- 支持多种通知方式(Discord、Telegram、Slack、邮件等)
|
||||
- 支持多种提供者(Docker、Kubernetes、Swarm、Nomad等)
|
||||
- 基于 Cron 表达式的调度
|
||||
- 支持多种架构(amd64、arm64、arm/v6、arm/v7等)
|
||||
- 轻量级设计,资源占用少
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 部署后 Diun 会自动开始监控 Docker 镜像
|
||||
2. 默认每6小时检查一次镜像更新
|
||||
3. 当检测到镜像更新时,会发送通知
|
||||
4. 通过 Docker 标签 `diun.enable=true` 控制哪些容器被监控
|
||||
|
||||
## 配置说明
|
||||
|
||||
- **时区**:Asia/Shanghai(上海时区)
|
||||
- **数据存储**:`./data` 目录包含 bbolt 数据库
|
||||
- **配置文件**:`./diun.yml` 包含所有监控和通知配置
|
||||
|
||||
## 默认配置
|
||||
|
||||
应用使用以下默认配置:
|
||||
|
||||
```yaml
|
||||
watch:
|
||||
workers: 20
|
||||
schedule: "0 */6 * * *"
|
||||
firstCheckNotif: false
|
||||
|
||||
providers:
|
||||
docker:
|
||||
watchByDefault: true
|
||||
```
|
||||
|
||||
## 自定义配置
|
||||
|
||||
如需自定义监控、通知等配置,请编辑 `diun.yml` 文件。参考官方文档进行配置:
|
||||
|
||||
- [配置概述](https://crazymax.dev/diun/configuration/overview/)
|
||||
- [通知配置](https://crazymax.dev/diun/notifications/)
|
||||
- [提供者配置](https://crazymax.dev/diun/providers/)
|
||||
|
||||
## 支持的提供者
|
||||
|
||||
- **Docker**:监控 Docker 容器和镜像
|
||||
- **Kubernetes**:监控 Kubernetes 集群
|
||||
- **Swarm**:监控 Docker Swarm 服务
|
||||
- **Nomad**:监控 HashiCorp Nomad 任务
|
||||
- **Dockerfile**:监控 Dockerfile 中的基础镜像
|
||||
- **File**:从文件读取镜像列表
|
||||
|
||||
## 支持的通知方式
|
||||
|
||||
- Discord、Telegram、Slack
|
||||
- 邮件、Matrix、MQTT
|
||||
- Pushover、Rocket.Chat
|
||||
- Webhook、Script、Signal
|
||||
- Gotify、Ntfy、Teams
|
||||
|
||||
## 监控配置
|
||||
|
||||
要监控特定的 Docker 容器,在容器标签中添加:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
- "diun.watch_repo=true" # 可选,监控仓库更新
|
||||
```
|
||||
|
||||
## 相关链接
|
||||
|
||||
- [官方网站](https://crazymax.dev/diun/)
|
||||
- [GitHub 项目](https://github.com/crazy-max/diun)
|
||||
- [Docker Hub](https://hub.docker.com/r/crazymax/diun/)
|
||||
- [基础示例](https://crazymax.dev/diun/usage/basic-example/)
|
||||
82
apps/diun/README_en.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Diun
|
||||
|
||||
Diun is a CLI tool to monitor Docker image updates and send notifications.
|
||||
|
||||
## Features
|
||||
|
||||
- Automatically monitor Docker image updates
|
||||
- Support multiple notification methods (Discord, Telegram, Slack, Email, etc.)
|
||||
- Support multiple providers (Docker, Kubernetes, Swarm, Nomad, etc.)
|
||||
- Cron-based scheduling
|
||||
- Support multiple architectures (amd64, arm64, arm/v6, arm/v7, etc.)
|
||||
- Lightweight design with low resource usage
|
||||
|
||||
## Usage
|
||||
|
||||
1. After deployment, Diun will automatically start monitoring Docker images
|
||||
2. Default check interval is every 6 hours
|
||||
3. When image updates are detected, notifications will be sent
|
||||
4. Control which containers are monitored via Docker label `diun.enable=true`
|
||||
|
||||
## Configuration
|
||||
|
||||
- **Timezone**: Asia/Shanghai (Shanghai timezone)
|
||||
- **Data Storage**: `./data` directory contains bbolt database
|
||||
- **Config File**: `./diun.yml` contains all monitoring and notification configurations
|
||||
|
||||
## Default Configuration
|
||||
|
||||
The application uses the following default configuration:
|
||||
|
||||
```yaml
|
||||
watch:
|
||||
workers: 20
|
||||
schedule: "0 */6 * * *"
|
||||
firstCheckNotif: false
|
||||
|
||||
providers:
|
||||
docker:
|
||||
watchByDefault: true
|
||||
```
|
||||
|
||||
## Custom Configuration
|
||||
|
||||
To customize monitoring, notifications, and other configurations, please edit the `diun.yml` file. Refer to the official documentation for configuration:
|
||||
|
||||
- [Configuration Overview](https://crazymax.dev/diun/configuration/overview/)
|
||||
- [Notifications](https://crazymax.dev/diun/notifications/)
|
||||
- [Providers](https://crazymax.dev/diun/providers/)
|
||||
|
||||
## Supported Providers
|
||||
|
||||
- **Docker**: Monitor Docker containers and images
|
||||
- **Kubernetes**: Monitor Kubernetes clusters
|
||||
- **Swarm**: Monitor Docker Swarm services
|
||||
- **Nomad**: Monitor HashiCorp Nomad tasks
|
||||
- **Dockerfile**: Monitor base images in Dockerfiles
|
||||
- **File**: Read image list from files
|
||||
|
||||
## Supported Notifications
|
||||
|
||||
- Discord, Telegram, Slack
|
||||
- Email, Matrix, MQTT
|
||||
- Pushover, Rocket.Chat
|
||||
- Webhook, Script, Signal
|
||||
- Gotify, Ntfy, Teams
|
||||
|
||||
## Monitoring Configuration
|
||||
|
||||
To monitor specific Docker containers, add these labels:
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
- "diun.watch_repo=true" # Optional, monitor repository updates
|
||||
```
|
||||
|
||||
## Links
|
||||
|
||||
- [Official Website](https://crazymax.dev/diun/)
|
||||
- [GitHub Project](https://github.com/crazy-max/diun)
|
||||
- [Docker Hub](https://hub.docker.com/r/crazymax/diun/)
|
||||
- [Basic Example](https://crazymax.dev/diun/usage/basic-example/)
|
||||