AI大模型部署Ubuntu服务器攻略

农民  金牌会员 | 2024-7-11 19:13:43 | 显示全部楼层 | 阅读模式
打印 上一主题 下一主题

主题 536|帖子 536|积分 1608

一、下载Ollama

在线安装:

在linux中输入命令curl -fsSL https://ollama.com/install.sh | sh
由于在linux下载ollama需要经过外网,网络会不稳定,很容易造成毗连超时的问题。
离线安装:

步骤一: 下载Ollama离线版本
在linux服务器中输入命令:lscpu查看服务器型号
然后再该地址中下载Ollama离线版本:
https://github.com/ollama/ollama/releases
步骤二: 下载install.sh文件修改内容
地址为:https://ollama.com/install.sh
修改位置1:
解释掉在线下载ollama的命令
修改位置2:
修改ollama安装地址,将ollama离线版本与install放到一起
install.sh最终修改的版本:
  1. #!/bin/sh
  2. # This script installs Ollama on Linux.
  3. # It detects the current operating system architecture and installs the appropriate version of Ollama.
  4. set -eu
  5. status() { echo ">>> $*" >&2; }
  6. error() { echo "ERROR $*"; exit 1; }
  7. warning() { echo "WARNING: $*"; }
  8. TEMP_DIR=$(mktemp -d)
  9. cleanup() { rm -rf $TEMP_DIR; }
  10. trap cleanup EXIT
  11. available() { command -v $1 >/dev/null; }
  12. require() {
  13.     local MISSING=''
  14.     for TOOL in $*; do
  15.         if ! available $TOOL; then
  16.             MISSING="$MISSING $TOOL"
  17.         fi
  18.     done
  19.     echo $MISSING
  20. }
  21. [ "$(uname -s)" = "Linux" ] || error 'This script is intended to run on Linux only.'
  22. ARCH=$(uname -m)
  23. case "$ARCH" in
  24.     x86_64) ARCH="amd64" ;;
  25.     aarch64|arm64) ARCH="arm64" ;;
  26.     *) error "Unsupported architecture: $ARCH" ;;
  27. esac
  28. IS_WSL2=false
  29. KERN=$(uname -r)
  30. case "$KERN" in
  31.     *icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2=true;;
  32.     *icrosoft) error "Microsoft WSL1 is not currently supported. Please upgrade to WSL2 with 'wsl --set-version <distro> 2'" ;;
  33.     *) ;;
  34. esac
  35. VER_PARAM="${OLLAMA_VERSION:+?version=$OLLAMA_VERSION}"
  36. SUDO=
  37. if [ "$(id -u)" -ne 0 ]; then
  38.     # Running as root, no need for sudo
  39.     if ! available sudo; then
  40.         error "This script requires superuser permissions. Please re-run as root."
  41.     fi
  42.     SUDO="sudo"
  43. fi
  44. NEEDS=$(require curl awk grep sed tee xargs)
  45. if [ -n "$NEEDS" ]; then
  46.     status "ERROR: The following tools are required but missing:"
  47.     for NEED in $NEEDS; do
  48.         echo "  - $NEED"
  49.     done
  50.     exit 1
  51. fi
  52. status "Downloading ollama..."
  53. # curl --fail --show-error --location --progress-bar -o $TEMP_DIR/ollama "https://ollama.com/download/ollama-linux-${ARCH}${VER_PARAM}"
  54. for BINDIR in /usr/local/bin /usr/bin /bin; do
  55.     echo $PATH | grep -q $BINDIR && break || continue
  56. done
  57. status "Installing ollama to $BINDIR..."
  58. $SUDO install -o0 -g0 -m755 -d $BINDIR
  59. # $SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $BINDIR/ollama
  60. $SUDO install -o0 -g0 -m755 ./ollama-linux-amd64 $BINDIR/ollama
  61. install_success() {
  62.     status 'The Ollama API is now available at 127.0.0.1:11434.'
  63.     status 'Install complete. Run "ollama" from the command line.'
  64. }
  65. trap install_success EXIT
  66. # Everything from this point onwards is optional.
  67. configure_systemd() {
  68.     if ! id ollama >/dev/null 2>&1; then
  69.         status "Creating ollama user..."
  70.         $SUDO useradd -r -s /bin/false -U -m -d /usr/share/ollama ollama
  71.     fi
  72.     if getent group render >/dev/null 2>&1; then
  73.         status "Adding ollama user to render group..."
  74.         $SUDO usermod -a -G render ollama
  75.     fi
  76.     if getent group video >/dev/null 2>&1; then
  77.         status "Adding ollama user to video group..."
  78.         $SUDO usermod -a -G video ollama
  79.     fi
  80.     status "Adding current user to ollama group..."
  81.     $SUDO usermod -a -G ollama $(whoami)
  82.     status "Creating ollama systemd service..."
  83.     cat <<EOF | $SUDO tee /etc/systemd/system/ollama.service >/dev/null
  84. [Unit]
  85. Description=Ollama Service
  86. After=network-online.target
  87. [Service]
  88. ExecStart=$BINDIR/ollama serve
  89. User=ollama
  90. Group=ollama
  91. Restart=always
  92. RestartSec=3
  93. Environment="PATH=$PATH"
  94. [Install]
  95. WantedBy=default.target
  96. EOF
  97.     SYSTEMCTL_RUNNING="$(systemctl is-system-running || true)"
  98.     case $SYSTEMCTL_RUNNING in
  99.         running|degraded)
  100.             status "Enabling and starting ollama service..."
  101.             $SUDO systemctl daemon-reload
  102.             $SUDO systemctl enable ollama
  103.             start_service() { $SUDO systemctl restart ollama; }
  104.             trap start_service EXIT
  105.             ;;
  106.     esac
  107. }
  108. if available systemctl; then
  109.     configure_systemd
  110. fi
  111. # WSL2 only supports GPUs via nvidia passthrough
  112. # so check for nvidia-smi to determine if GPU is available
  113. if [ "$IS_WSL2" = true ]; then
  114.     if available nvidia-smi && [ -n "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
  115.         status "Nvidia GPU detected."
  116.     fi
  117.     install_success
  118.     exit 0
  119. fi
  120. # Install GPU dependencies on Linux
  121. if ! available lspci && ! available lshw; then
  122.     warning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies."
  123.     exit 0
  124. fi
  125. check_gpu() {
  126.     # Look for devices based on vendor ID for NVIDIA and AMD
  127.     case $1 in
  128.         lspci)
  129.             case $2 in
  130.                 nvidia) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ;;
  131.                 amdgpu) available lspci && lspci -d '1002:' | grep -q 'AMD' || return 1 ;;
  132.             esac ;;
  133.         lshw)
  134.             case $2 in
  135.                 nvidia) available lshw && $SUDO lshw -c display -numeric | grep -q 'vendor: .* \[10DE\]' || return 1 ;;
  136.                 amdgpu) available lshw && $SUDO lshw -c display -numeric | grep -q 'vendor: .* \[1002\]' || return 1 ;;
  137.             esac ;;
  138.         nvidia-smi) available nvidia-smi || return 1 ;;
  139.     esac
  140. }
  141. if check_gpu nvidia-smi; then
  142.     status "NVIDIA GPU installed."
  143.     exit 0
  144. fi
  145. if ! check_gpu lspci nvidia && ! check_gpu lshw nvidia && ! check_gpu lspci amdgpu && ! check_gpu lshw amdgpu; then
  146.     install_success
  147.     warning "No NVIDIA/AMD GPU detected. Ollama will run in CPU-only mode."
  148.     exit 0
  149. fi
  150. if check_gpu lspci amdgpu || check_gpu lshw amdgpu; then
  151.     # Look for pre-existing ROCm v6 before downloading the dependencies
  152.     for search in "${HIP_PATH:-''}" "${ROCM_PATH:-''}" "/opt/rocm" "/usr/lib64"; do
  153.         if [ -n "${search}" ] && [ -e "${search}/libhipblas.so.2" -o -e "${search}/lib/libhipblas.so.2" ]; then
  154.             status "Compatible AMD GPU ROCm library detected at ${search}"
  155.             install_success
  156.             exit 0
  157.         fi
  158.     done
  159.     status "Downloading AMD GPU dependencies..."
  160.     $SUDO rm -rf /usr/share/ollama/lib
  161.     $SUDO chmod o+x /usr/share/ollama
  162.     $SUDO install -o ollama -g ollama -m 755 -d /usr/share/ollama/lib/rocm
  163.     curl --fail --show-error --location --progress-bar "https://ollama.com/download/ollama-linux-amd64-rocm.tgz${VER_PARAM}" \
  164.         | $SUDO tar zx --owner ollama --group ollama -C /usr/share/ollama/lib/rocm .
  165.     install_success
  166.     status "AMD GPU ready."
  167.     exit 0
  168. fi
  169. # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-7-centos-7
  170. # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-8-rocky-8
  171. # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#rhel-9-rocky-9
  172. # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#fedora
  173. install_cuda_driver_yum() {
  174.     status 'Installing NVIDIA repository...'
  175.     case $PACKAGE_MANAGER in
  176.         yum)
  177.             $SUDO $PACKAGE_MANAGER -y install yum-utils
  178.             $SUDO $PACKAGE_MANAGER-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-$1$2.repo
  179.             ;;
  180.         dnf)
  181.             $SUDO $PACKAGE_MANAGER config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-$1$2.repo
  182.             ;;
  183.     esac
  184.     case $1 in
  185.         rhel)
  186.             status 'Installing EPEL repository...'
  187.             # EPEL is required for third-party dependencies such as dkms and libvdpau
  188.             $SUDO $PACKAGE_MANAGER -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-$2.noarch.rpm || true
  189.             ;;
  190.     esac
  191.     status 'Installing CUDA driver...'
  192.     if [ "$1" = 'centos' ] || [ "$1$2" = 'rhel7' ]; then
  193.         $SUDO $PACKAGE_MANAGER -y install nvidia-driver-latest-dkms
  194.     fi
  195.     $SUDO $PACKAGE_MANAGER -y install cuda-drivers
  196. }
  197. # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#ubuntu
  198. # ref: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#debian
  199. install_cuda_driver_apt() {
  200.     status 'Installing NVIDIA repository...'
  201.     curl -fsSL -o $TEMP_DIR/cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/$1$2/$(uname -m)/cuda-keyring_1.1-1_all.deb
  202.     case $1 in
  203.         debian)
  204.             status 'Enabling contrib sources...'
  205.             $SUDO sed 's/main/contrib/' < /etc/apt/sources.list | $SUDO tee /etc/apt/sources.list.d/contrib.list > /dev/null
  206.             if [ -f "/etc/apt/sources.list.d/debian.sources" ]; then
  207.                 $SUDO sed 's/main/contrib/' < /etc/apt/sources.list.d/debian.sources | $SUDO tee /etc/apt/sources.list.d/contrib.sources > /dev/null
  208.             fi
  209.             ;;
  210.     esac
  211.     status 'Installing CUDA driver...'
  212.     $SUDO dpkg -i $TEMP_DIR/cuda-keyring.deb
  213.     $SUDO apt-get update
  214.     [ -n "$SUDO" ] && SUDO_E="$SUDO -E" || SUDO_E=
  215.     DEBIAN_FRONTEND=noninteractive $SUDO_E apt-get -y install cuda-drivers -q
  216. }
  217. if [ ! -f "/etc/os-release" ]; then
  218.     error "Unknown distribution. Skipping CUDA installation."
  219. fi
  220. . /etc/os-release
  221. OS_NAME=$ID
  222. OS_VERSION=$VERSION_ID
  223. PACKAGE_MANAGER=
  224. for PACKAGE_MANAGER in dnf yum apt-get; do
  225.     if available $PACKAGE_MANAGER; then
  226.         break
  227.     fi
  228. done
  229. if [ -z "$PACKAGE_MANAGER" ]; then
  230.     error "Unknown package manager. Skipping CUDA installation."
  231. fi
  232. if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
  233.     case $OS_NAME in
  234.         centos|rhel) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -d '.' -f 1) ;;
  235.         rocky) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -c1) ;;
  236.         fedora) [ $OS_VERSION -lt '37' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '37';;
  237.         amzn) install_cuda_driver_yum 'fedora' '37' ;;
  238.         debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;;
  239.         ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed 's/\.//') ;;
  240.         *) exit ;;
  241.     esac
  242. fi
  243. if ! lsmod | grep -q nvidia || ! lsmod | grep -q nvidia_uvm; then
  244.     KERNEL_RELEASE="$(uname -r)"
  245.     case $OS_NAME in
  246.         rocky) $SUDO $PACKAGE_MANAGER -y install kernel-devel kernel-headers ;;
  247.         centos|rhel|amzn) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE kernel-headers-$KERNEL_RELEASE ;;
  248.         fedora) $SUDO $PACKAGE_MANAGER -y install kernel-devel-$KERNEL_RELEASE ;;
  249.         debian|ubuntu) $SUDO apt-get -y install linux-headers-$KERNEL_RELEASE ;;
  250.         *) exit ;;
  251.     esac
  252.     NVIDIA_CUDA_VERSION=$($SUDO dkms status | awk -F: '/added/ { print $1 }')
  253.     if [ -n "$NVIDIA_CUDA_VERSION" ]; then
  254.         $SUDO dkms install $NVIDIA_CUDA_VERSION
  255.     fi
  256.     if lsmod | grep -q nouveau; then
  257.         status 'Reboot to complete NVIDIA CUDA driver install.'
  258.         exit 0
  259.     fi
  260.     $SUDO modprobe nvidia
  261.     $SUDO modprobe nvidia_uvm
  262. fi
  263. # make sure the NVIDIA modules are loaded on boot with nvidia-persistenced
  264. if command -v nvidia-persistenced > /dev/null 2>&1; then
  265.     $SUDO touch /etc/modules-load.d/nvidia.conf
  266.     MODULES="nvidia nvidia-uvm"
  267.     for MODULE in $MODULES; do
  268.         if ! grep -qxF "$MODULE" /etc/modules-load.d/nvidia.conf; then
  269.             echo "$MODULE" | sudo tee -a /etc/modules-load.d/nvidia.conf > /dev/null
  270.         fi
  271.     done
  272. fi
  273. status "NVIDIA GPU ready."
  274. install_success
复制代码
出现该内容说明Ollama已经安装完成
二、启动Nginx并部署Vue

启动nginx命令:systemctl start nginx.service
查看nginx状态:systemctl status nginx.service
关闭nginx命令:systemctl stop nginx.service
修改子配置文件,因为子配置文件内是写http的内容。
nginx服务所在地址为:/etc/nginx/sites-available
进入该目录编辑default文件:vim default
  1. index index.html index.htm index.nginx-debian.html;
  2.   # First attempt to serve request as file, then
  3.         # as directory, then fall back to displaying a 404.
  4.         try_files $uri $uri/ @router;
  5. }
  6. location @router {
  7.         rewrite ^.*$ /index.html last;
  8. }
复制代码
如果你前端使用的是vue并且用了vue-router,那么就需要配置该代码,否则你举行router跳转的时候,就会出现404的问题。
三、启动Python脚本

进入存放python脚本的目录,运行命令:python xxx.py。运行脚本后,体系可能会提示有一些模块没有安装,按照提示安装即可。
命令:pip install module_name
其中可能有些脚本提示不对,比如:
ModuleNotFoundError: No module named 'docx'
如果出现这个问题,不能直接安装docx模块,而是应该安装python-docx。
将该安装的库全部安装后,进入放置python脚本的目录启动入口文件,短暂启动命令:python ai_analysis.py
长期后台运行命令:
nohup python ai_analysis.py /opt/app/llm_python/ai_analysis_project/log 2>&1
四、目前项目需要的库

使用MimiCPM需要的库,官方测试所用的环境:
Pillow10.1.0
torch2.1.2 / 1.13.0(本来的库版本)
torchvision0.16.2 / 0.17.1(本来的库版本)
transformers4.40.0
sentencepiece0.1.99
accelerate0.30.1
bitsandbytes==0.43.1
AI分析所需要的库
langchain
langchain_community
分析文档所需要的库
pandasai
python-docx
fitz
faiss-gpu (conda install faiss-gpu -c pytorch)

免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。
回复

使用道具 举报

0 个回复

倒序浏览

快速回复

您需要登录后才可以回帖 登录 or 立即注册

本版积分规则

农民

金牌会员
这个人很懒什么都没写!

标签云

快速回复 返回顶部 返回列表