菜单
本页目录

先决条件-Git下载链接:https://git-scm.com/downloads/win

一键启动设计(gpt_academic为例)

参考项目:https://github.com/binary-husky/gpt_academic

安装方法〇:一键启动脚本

Windows启动脚本

@echo off
setlocal

@REM 设置命令提示符为UTF-8编码以支持中文显示
chcp 65001 1>nul
@REM 切换到脚本所在目录
cd /D "%~dp0"
@REM 确保系统路径中包含system32目录
set PATH=%PATH%;%SystemRoot%\system32
@REM 检查当前目录路径中是否包含空格
echo "%CD%"| findstr /C:" " >nul && echo 该脚本依赖于Miniconda,且不能在包含空格的路径下静默安装。 && goto end

@REM 设置临时文件目录以及Miniconda的安装目录
set TMP=%cd%\installer_files
set TEMP=%cd%\installer_files
set INSTALL_DIR=%cd%\installer_files
set CONDA_ROOT_PREFIX=%cd%\installer_files\conda
set INSTALL_ENV_DIR=%cd%\installer_files\env
@REM 定义Miniconda安装程序的下载URL
set MINICONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/Miniconda3-py312_24.1.2-0-Windows-x86_64.exe
set conda_exists=F

@REM 检查Miniconda是否已经安装
call "%CONDA_ROOT_PREFIX%\_conda.exe" --version >nul 2>&1
if "%ERRORLEVEL%" EQU "0" set conda_exists=T
@REM 如果Miniconda未安装,则下载并安装
if "%conda_exists%" == "F" (
    echo 正在从 %MINICONDA_DOWNLOAD_URL% 下载Miniconda到 %INSTALL_DIR%\miniconda_installer.exe
    mkdir "%INSTALL_DIR%"
    call curl -Lk "%MINICONDA_DOWNLOAD_URL%" > "%INSTALL_DIR%\miniconda_installer.exe" || ( echo. && echo Miniconda下载失败。 && goto end )
    echo 正在将Miniconda安装到 %CONDA_ROOT_PREFIX%
    start /wait "" "%INSTALL_DIR%\miniconda_installer.exe" /InstallationType=JustMe /NoShortcuts=1 /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=%CONDA_ROOT_PREFIX%
    echo Miniconda version:
    call "%CONDA_ROOT_PREFIX%\_conda.exe" --version || ( echo. && echo Miniconda未找到。 && goto end )
)

@REM 创建Conda环境,如果环境目录不存在且未指定要安装的包列表,脚本将在此处中断
if not exist "%INSTALL_ENV_DIR%" (
    echo Packages to install: %PACKAGES_TO_INSTALL%
    call "%CONDA_ROOT_PREFIX%\_conda.exe" create --no-shortcuts -y -k --prefix "%INSTALL_ENV_DIR%" python=3.10 requests || ( echo. && echo Conda环境创建失败。 && goto end )
)
@REM 检查Conda环境是否正确创建
if not exist "%INSTALL_ENV_DIR%\python.exe" ( echo. && echo Conda environment is empty. && goto end )

@REM 设置环境变量,防止用户站点包干扰,清空PYTHONPATH,设置PYTHONHOME,并配置CUDA路径
set PYTHONNOUSERSITE=1
set PYTHONPATH=
set PYTHONHOME=
set "CUDA_PATH=%INSTALL_ENV_DIR%"
set "CUDA_HOME=%CUDA_PATH%"
@REM 激活刚刚创建的Conda环境
call "%CONDA_ROOT_PREFIX%\condabin\conda.bat" activate "%INSTALL_ENV_DIR%" || ( echo. && echo Miniconda hook not found. && goto end )

echo.
echo 安装完成!

@rem setup installer env
call python pip_setup.py

@REM 暂停脚本以便查看输出
pause

MacOS启动脚本

#!/bin/bash

cd "$(dirname "${BASH_SOURCE[0]}")"

if [[ "$(pwd)" =~ " " ]]; then echo This script relies on Miniconda which can not be silently installed under a path with spaces. && exit; fi

# M Series or Intel
OS_ARCH=$(uname -m)
case "${OS_ARCH}" in
    x86_64*)    OS_ARCH="x86_64";;
    arm64*)     OS_ARCH="arm64";;
    *)          echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
esac

# config
INSTALL_DIR="$(pwd)/installer_files"
CONDA_ROOT_PREFIX="$(pwd)/installer_files/conda"
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
MINICONDA_DOWNLOAD_URL="https://repo.anaconda.com/miniconda/Miniconda3-py310_23.1.0-1-MacOSX-${OS_ARCH}.sh"
conda_exists="F"

# figure out whether git and conda needs to be installed
if "$CONDA_ROOT_PREFIX/bin/conda" --version &>/dev/null; then conda_exists="T"; fi

# (if necessary) install git and conda into a contained environment
# download miniconda
if [ "$conda_exists" == "F" ]; then
    echo "Downloading Miniconda from $MINICONDA_DOWNLOAD_URL to $INSTALL_DIR/miniconda_installer.sh"

    mkdir -p "$INSTALL_DIR"
    curl -Lk "$MINICONDA_DOWNLOAD_URL" > "$INSTALL_DIR/miniconda_installer.sh"

    chmod u+x "$INSTALL_DIR/miniconda_installer.sh"
    bash "$INSTALL_DIR/miniconda_installer.sh" -b -p $CONDA_ROOT_PREFIX

    # test the conda binary
    echo "Miniconda version:"
    "$CONDA_ROOT_PREFIX/bin/conda" --version
fi

# create the installer env
if [ ! -e "$INSTALL_ENV_DIR" ]; then
    "$CONDA_ROOT_PREFIX/bin/conda" create -y -k --prefix "$INSTALL_ENV_DIR" python=3.10 requests
fi

# check if conda environment was actually created
if [ ! -e "$INSTALL_ENV_DIR/bin/python" ]; then
    echo "Conda environment is empty."
    exit
fi

# environment isolation
export PYTHONNOUSERSITE=1
unset PYTHONPATH
unset PYTHONHOME
export CUDA_PATH="$INSTALL_ENV_DIR"
export CUDA_HOME="$CUDA_PATH"

# activate installer env
source "$CONDA_ROOT_PREFIX/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
conda activate "$INSTALL_ENV_DIR"

# setup installer env
python pip_setup.py

echo
echo "Done!"

Python启动脚本

# https://github.com/oobabooga/one-click-installers

# 导入所需的库
import requests  # 用于发送HTTP请求
import os  # 用于与操作系统进行交互
import subprocess  # 用于运行shell命令
import zipfile  # 用于处理ZIP文件
import sys  # 用于与Python解释器进行交互

# 获取当前脚本所在的目录
script_dir = os.getcwd()

# 定义一个函数来运行shell命令,并可选地检查命令是否成功执行
def run_cmd(cmd, assert_success=False, environment=False, capture_output=False, env=None):
    # 如果需要使用conda环境,则构建激活环境的命令
    if environment:
        conda_env_path = os.path.join(script_dir, "installer_files", "env")  # conda环境路径
        if sys.platform.startswith("win"):  # 判断操作系统是否为Windows
            conda_bat_path = os.path.join(script_dir, "installer_files", "conda", "condabin", "conda.bat")  # Windows下的conda.bat路径
            cmd = "\"" + conda_bat_path + "\" activate \"" + conda_env_path + "\" >nul && " + cmd  # 构建激活conda环境的命令
        else:  # 非Windows系统(假设为Linux或macOS)
            conda_sh_path = os.path.join(script_dir, "installer_files", "conda", "etc", "profile.d", "conda.sh")  # Linux/macOS下的conda.sh路径
            cmd = ". \"" + conda_sh_path + "\" && conda activate \"" + conda_env_path + "\" && " + cmd  # 构建激活conda环境的命令

    # 运行shell命令,并根据需要捕获输出
    result = subprocess.run(cmd, shell=True, capture_output=capture_output, env=env)

    # 如果assert_success为True,检查命令是否成功执行
    if assert_success and result.returncode != 0:
        print("Command '" + cmd + "' failed with exit status code '" + str(result.returncode) + "'. Exiting...")  # 输出失败信息
        sys.exit()  # 退出程序
    return result  # 返回命令执行结果


# 定义一个函数来检查conda环境是否存在并且不是base环境
def check_env():
    # 检查conda是否存在
    conda_exist = run_cmd("conda", environment=True, capture_output=True).returncode == 0
    if not conda_exist:
        print("Conda is not installed. Exiting...")  # 输出conda未安装信息
        sys.exit()  # 退出程序

    # 确保当前不是base环境
    if os.environ["CONDA_DEFAULT_ENV"] == "base":
        print("Create an environment for this project and activate it. Exiting...")  # 输出应创建并激活新环境的信息
        sys.exit()  # 退出程序


# 定义一个函数来安装项目依赖
def install_dependencies():
    # 提示用户选择依赖安装方式
    print('选择一种依赖安装方式:\n')
    print('1: 使用pypi官方\n')
    print('2: 使用阿里源\n')
    try:
        choice = int(input('\n输入1或2. 然后敲回车: '))  # 获取用户输入
    except:
        choice = 1  # 如果输入无效,默认选择1

    # 根据用户选择的安装方式来运行相应的pip命令
    if choice == 1:
        run_cmd("python -m pip install -r requirements.txt --upgrade -i https://pypi.org/simple/", assert_success=True, environment=True)
    elif choice == 2:
        run_cmd("python -m pip install -r requirements.txt --upgrade -i https://mirrors.aliyun.com/pypi/simple/", assert_success=True, environment=True)
    else:  # 用户未输入有效的选项时,默认选择1
        run_cmd("python -m pip install -r requirements.txt --upgrade -i https://pypi.org/simple/", assert_success=True, environment=True)


# 定义就绪消息
ready_msg = """

一切准备就绪!

- (重要)请确保 gpt_academic/config.py 中已经配置了正确的设置选项.
- (重要)关于如何配置项目, 请查看 `https://github.com/binary-husky/gpt_academic/wiki`.
- 关闭此窗口即可结束本程序, 下次运行时, 仍然是启动`Windows双击这里运行.bat` (或者`MacOS点击这里运行.sh`)
- 如需卸载, 删除整个文件夹即可

当您准备完毕后, 请敲回车继续.
"""

# 定义一个函数来运行模型
def run_model():
    input(ready_msg)  # 显示就绪消息并等待用户确认
    run_cmd(f"python main.py", environment=True)  # 运行模型的主程序


# 主程序入口
if __name__ == "__main__":
    # 检查项目代码是否已经存在
    if not os.path.exists('./gpt_academic/main.py'):
        print('正在从Github获取代码...')
        # 提示用户选择代码获取方式
        print('选择一种代码获取方式:\n')
        print('1: 自动: 连接Github镜像下载(可能会下载到非最新版本)\n')
        print('2: 自动: 连接Github下载(可能会遇到众所周知的那个问题)\n')
        print('3: 手动: 从Github下载`https://github.com/binary-husky/gpt_academic/archive/refs/heads/master.zip`。')
        print(' \t\t并把压缩包中的`gpt_academic-master`文件夹解压到当前路径下。')
        print(f'\t\t进一步提示: ')
        print(f'\t\t\t把压缩包中的`gpt_academic-master`文件夹')
        print(f'\t\t\t拖到`{os.getcwd()}`,')
        print(f'\t\t\t得到`{os.path.join(os.getcwd(), "gpt_academic-master")}`,')
        print(f'\t\t\t最后输入3敲回车。')

        choice = int(input('\n\n输入1,2或者3. 然后敲回车: '))  # 获取用户输入

        # 根据用户选择的代码获取方式来下载或解压代码
        if choice == 1:
            r = requests.get('https://public.agent-matrix.com/publish/master.zip', stream=True)  # 下载代码压缩包
            zip_file_path = './master.zip'
            with open(zip_file_path, 'wb+') as f:
                cnt = 0
                for chunk in r.iter_content(chunk_size=4096):
                    if chunk:
                        f.write(chunk)  # 写入文件
                        cnt += 1
                        if cnt % 20 == 0: print('.', end='', flush=True)  # 每下载20个chunk显示一个点
            print('下载完成')
            with zipfile.ZipFile(zip_file_path, 'r') as zipobj:
                zipobj.extractall(path='./')  # 解压ZIP文件到当前目录
                print("解压完成")
            while True:
                if os.path.exists('./gpt_academic-master/main.py'):  # 检查解压后的文件夹是否存在
                    os.rename('gpt_academic-master', 'gpt_academic')  # 重命名文件夹为gpt_academic
                    break
                input('尚未检测到gpt_academic-master文件夹, 请尝试方法3, 回车重试.')  # 提示用户重试

        elif choice == 2:
            run_cmd("git clone --depth=1 https://github.com/binary-husky/gpt_academic.git", assert_success=True, environment=True)  # 使用git克隆项目代码

        elif choice == 3:
            while True:
                if os.path.exists('./gpt_academic-master/main.py'):  # 检查手动下载并解压的文件夹是否存在
                    os.rename('gpt_academic-master', 'gpt_academic')  # 重命名文件夹为gpt_academic
                    break
                input('尚未检测到gpt_academic-master文件夹, 回车重试.')  # 提示用户重试
        else:
            assert False, '未知选项'  # 如果用户输入的选项无效,抛出异常

    # 检查conda环境
    check_env()

    # 如果项目代码已经存在,则进入代码目录安装依赖并运行模型
    if os.path.exists("gpt_academic/"):
        os.chdir("gpt_academic")  # 切换到项目目录
        install_dependencies()  # 安装项目依赖
        run_model()  # 运行模型

安装方法I:直接运行 (Windows, Linux or MacOS)

  1. 下载项目

    git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
    cd gpt_academic
    
  2. 配置API_KEY等变量 在config.py​中,配置API KEY等变量。特殊网络环境设置方法Wiki-项目配置说明。 「 程序会优先检查是否存在名为config_private.py​的私密配置文件,并用其中的配置覆盖config.py​的同名配置。如您能理解以上读取逻辑,我们强烈建议您在config.py​同路径下创建一个名为config_private.py​的新配置文件,并使用config_private.py​配置项目,从而确保自动更新时不会丢失配置 」。 「 支持通过环境变量​配置项目,环境变量的书写格式参考docker-compose.yml​文件或者我们的Wiki页面。配置读取优先级: 环境变量​ > config_private.py​ > config.py​ 」。

  3. 安装依赖

    # (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
    python -m pip install -r requirements.txt
    
    # (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
    conda create -n gptac_venv python=3.11    # 创建anaconda环境
    conda activate gptac_venv                 # 激活anaconda环境
    python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
    
  4. 运行

    python main.py
    

安装方法II:使用Docker

部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法部署完整项目)修改docker-compose.yml,保留方案0并删除其他方案。然后运行:

仅ChatGPT + GLM4 + 文心一言+spark等在线模型(推荐大多数人选择)修改docker-compose.yml,保留方案1并删除其他方案。然后运行:P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。

ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉Nvidia Docker运行时)修改docker-compose.yml,保留方案2并删除其他方案。然后运行:

运行:docker-compose up

## ===================================================
#docker-compose.yml
## ===================================================
# 1. 请在以下方案中选择任意一种,然后删除其他的方案
# 2. 修改你选择的方案中的environment环境变量,详情请见github wiki或者config.py
# 3. 选择一种暴露服务端口的方法,并对相应的配置做出修改:
    # 「方法1: 适用于Linux,很方便,可惜windows不支持」与宿主的网络融合为一体,这个是默认配置
    # network_mode: "host"
    # 「方法2: 适用于所有系统包括Windows和MacOS」端口映射,把容器的端口映射到宿主的端口(注意您需要先删除network_mode: "host",再追加以下内容)
    # ports:
    #   - "12345:12345"  # 注意!12345必须与WEB_PORT环境变量相互对应
# 4. 最后`docker-compose up`运行
# 5. 如果希望使用显卡,请关注 LOCAL_MODEL_DEVICE 和 英伟达显卡运行时 选项
## ===================================================
# 1. Please choose one of the following options and delete the others.
# 2. Modify the environment variables in the selected option, see GitHub wiki or config.py for more details.
# 3. Choose a method to expose the server port and make the corresponding configuration changes:
    # [Method 1: Suitable for Linux, convenient, but not supported for Windows] Fusion with the host network, this is the default configuration
    # network_mode: "host"
    # [Method 2: Suitable for all systems including Windows and MacOS] Port mapping, mapping the container port to the host port (note that you need to delete network_mode: "host" first, and then add the following content)
    # ports:
    # - "12345: 12345" # Note! 12345 must correspond to the WEB_PORT environment variable.
# 4. Finally, run `docker-compose up`.
# 5. If you want to use a graphics card, pay attention to the LOCAL_MODEL_DEVICE and Nvidia GPU runtime options.
## ===================================================
## 「方案零」 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个)
## ===================================================
version: '3'
services:
  gpt_academic_full_capability:
    image: ghcr.io/binary-husky/gpt_academic_with_all_capacity:master
    environment:
      # 请查阅 `config.py`或者 github wiki 以查看所有的配置信息
      API_KEY: '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'
      LLM_MODEL: 'gpt-3.5-turbo'
      AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "gpt-4", "qianfan", "sparkv2", "spark", "chatglm"]'
      BAIDU_CLOUD_API_KEY: 'bTUtwEAveBrQipEowUvDwYWq'
      BAIDU_CLOUD_SECRET_KEY: 'jqXtLvXiVw6UNdjliATTS61rllG8Iuni'
      XFYUN_APPID: '53a8d816'
      XFYUN_API_SECRET: 'MjMxNDQ4NDE4MzM0OSNlNjQ2NTlhMTkx'
      XFYUN_API_KEY: '95ccdec285364869d17b33e75ee96447'
      ENABLE_AUDIO: 'False'
      DEFAULT_WORKER_NUM: '20'
      WEB_PORT: '12345'
      ADD_WAIFU: 'False'
      ALIYUN_APPKEY: 'RxPlZrM88DnAFkZK'
      THEME: 'Chuanhu-Small-and-Beautiful'
      ALIYUN_ACCESSKEY: 'LTAI5t6BrFUzxRXVGUWnekh1'
      ALIYUN_SECRET: 'eHmI20SVWIwQZxCiTD2bGQVspP9i68'
      # LOCAL_MODEL_DEVICE:       '  cuda       '

    # 加载英伟达显卡运行时
    # runtime: nvidia
    # deploy:
    #     resources:
    #       reservations:
    # devices:
    # - driver: nvidia
    #   count: 1
    #   capabilities: [gpu]

    # 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
    network_mode: "host"

    # 「WEB_PORT暴露方法2: 适用于所有系统」端口映射
    # ports:
    #   - "12345:12345"  # 12345必须与WEB_PORT相互对应

    # 启动容器后,运行main.py主程序
    command: >
      bash -c "python3 -u main.py"

## ===================================================
## 「方案一」 如果不需要运行本地模型(仅 chatgpt, azure, 星火, 千帆, claude 等在线大模型服务)
## ===================================================
version: '3'
services:
  gpt_academic_nolocalllms:
    image: ghcr.io/binary-husky/gpt_academic_nolocal:master # (Auto Built by Dockerfile: docs/GithubAction+NoLocal)
    environment:
      # 请查阅 `config.py` 以查看所有的配置信息
      API_KEY: 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
      USE_PROXY: 'True'
      proxies: '{ "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", }'
      LLM_MODEL: 'gpt-3.5-turbo'
      AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "sparkv2", "qianfan"]'
      WEB_PORT: '22303'
      ADD_WAIFU: 'True'

    # 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
    network_mode: "host"

    # 启动命令
    command: >
      bash -c "python3 -u main.py"

## ===================================================
## 「方案二」 如果需要运行ChatGLM + Qwen + MOSS等本地模型
## ===================================================
version: '3'
services:
  gpt_academic_with_chatglm:
    image: ghcr.io/binary-husky/gpt_academic_chatglm_moss:master  # (Auto Built by Dockerfile: docs/Dockerfile+ChatGLM)
    environment:
      # 请查阅 `config.py` 以查看所有的配置信息
      API_KEY: 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
      USE_PROXY: 'True'
      proxies: '{ "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", }'
      LLM_MODEL: 'gpt-3.5-turbo'
      AVAIL_LLM_MODELS: '["chatglm", "qwen", "moss", "gpt-3.5-turbo", "gpt-4", "newbing"]'
      LOCAL_MODEL_DEVICE: 'cuda'
      DEFAULT_WORKER_NUM: '10'
      WEB_PORT: '12303'
      ADD_WAIFU: 'True'

    # 显卡的使用,nvidia0指第0个GPU
    runtime: nvidia
    devices:
      - /dev/nvidia0:/dev/nvidia0

    # 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
    network_mode: "host"

    # 启动命令
    command: >
      bash -c "python3 -u main.py"

    # P.S. 通过对 command 进行微调,可以便捷地安装额外的依赖
    # command: >
    #   bash -c "pip install -r request_llms/requirements_qwen.txt && python3 -u main.py"

## ===================================================
## 「方案三」 如果需要运行ChatGPT + LLAMA + 盘古 + RWKV本地模型
## ===================================================
version: '3'
services:
  gpt_academic_with_rwkv:
    image: ghcr.io/binary-husky/gpt_academic_jittorllms:master
    environment:
      # 请查阅 `config.py` 以查看所有的配置信息
      API_KEY: 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
      USE_PROXY: 'True'
      proxies: '{ "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", }'
      LLM_MODEL: 'gpt-3.5-turbo'
      AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "newbing", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]'
      LOCAL_MODEL_DEVICE: 'cuda'
      DEFAULT_WORKER_NUM: '10'
      WEB_PORT: '12305'
      ADD_WAIFU: 'True'

    # 显卡的使用,nvidia0指第0个GPU
    runtime: nvidia
    devices:
      - /dev/nvidia0:/dev/nvidia0

    # 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
    network_mode: "host"

    # 启动命令
    command: >
      bash -c "python3 -u main.py"

## ===================================================
## 「方案四」 ChatGPT + Latex
## ===================================================
version: '3'
services:
  gpt_academic_with_latex:
    image: ghcr.io/binary-husky/gpt_academic_with_latex:master  # (Auto Built by Dockerfile: docs/GithubAction+NoLocal+Latex)
    environment:
      # 请查阅 `config.py` 以查看所有的配置信息
      API_KEY: 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
      USE_PROXY: 'True'
      proxies: '{ "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", }'
      LLM_MODEL: 'gpt-3.5-turbo'
      AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "gpt-4"]'
      LOCAL_MODEL_DEVICE: 'cuda'
      DEFAULT_WORKER_NUM: '10'
      WEB_PORT: '12303'
      ADD_WAIFU: 'True'

    # 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
    network_mode: "host"

    # 启动命令
    command: >
      bash -c "python3 -u main.py"

## ===================================================
## 「方案五」 ChatGPT + 语音助手 (请先阅读 docs/use_audio.md)
## ===================================================
version: '3'
services:
  gpt_academic_with_audio:
    image: ghcr.io/binary-husky/gpt_academic_audio_assistant:master
    environment:
      # 请查阅 `config.py` 以查看所有的配置信息
      API_KEY: 'fk195831-IdP0Pb3W6DCMUIbQwVX6MsSiyxwqybyS'
      USE_PROXY: 'False'
      proxies: 'None'
      LLM_MODEL: 'gpt-3.5-turbo'
      AVAIL_LLM_MODELS: '["gpt-3.5-turbo", "gpt-4"]'
      ENABLE_AUDIO: 'True'
      LOCAL_MODEL_DEVICE: 'cuda'
      DEFAULT_WORKER_NUM: '20'
      WEB_PORT: '12343'
      ADD_WAIFU: 'True'
      THEME: 'Chuanhu-Small-and-Beautiful'
      ALIYUN_APPKEY: 'RoP1ZrM84DnAFkZK'
      ALIYUN_TOKEN: 'f37f30e0f9934c34a992f6f64f7eba4f'

    # 「WEB_PORT暴露方法1: 适用于Linux」与宿主的网络融合
    network_mode: "host"

    # 启动命令
    command: >
      bash -c "python3 -u main.py"