准备
按照方文档 7b的模型最少需要11.2G显存由于硬件限制本文采用7B模型
安装conda
//下载运行安装包
//清华源下载最新安装包
wget https://mirrors.tuna.tsinghua.edu.cn/anaconda/archive/Anaconda3-2023.03-1-Linux-x86_64.sh
bash Anaconda3-2021.11-Linux-x86_64.sh
//安装目录/home/ubuntu/anaconda3
//编辑环境变量
sudo nano ~/.bashrc
//文本末尾添加
export PATH="/home/ubuntu/anaconda3/bin:$PATH"
//保存推出
//生效环境变量
source ~/.bashrc
//验证安装
conda --version
获取LLaMA
参考文章 https://zhuanlan.zhihu.com/p/623769040
//先安装git-lfs
curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash
sudo apt-get install git-lfs
git lfs install
//克隆huguface上的llama7b模型
git clone https://huggingface.co/decapoda-research/llama-7b-hf
//此模型仓库巨达13GB 下载巨特么慢
获取Vicuna增量文件
//克隆Vicunda7B增量文件,下载MiniGPT-4要求的V0版本
//仓库大概13GB
git clone https://huggingface.co/lmsys/vicuna-7b-delta-v0
生成Vicuna模型
//安装依赖
//1 安装CUDA
//https://developer.nvidia.com/cuda-downloads
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin
sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600
wget https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda-repo-ubuntu2004-12-1-local_12.1.1-530.30.02-1_amd64.deb
sudo dpkg -i cuda-repo-ubuntu2004-12-1-local_12.1.1-530.30.02-1_amd64.deb
sudo cp /var/cuda-repo-ubuntu2004-12-1-local/cuda-*-keyring.gpg /usr/share/keyrings/
sudo apt-get update
sudo apt-get -y install cuda
//设置环境变量
sudo nano ~/.bashrc
//文末添加
# user-add: cuda
export PATH=/usr/local/cuda-12/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-12/lib64:$LD_LIBRARY_PATH
export CUDA_HOME=/usr/local/cuda
//生效环境变量
source ~/.bashrc
//查看安装是否成功
nvcc --version
//输出版本表示成功
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2023 NVIDIA Corporation
Built on Mon_Apr__3_17:16:06_PDT_2023
Cuda compilation tools, release 12.1, V12.1.105
Build cuda_12.1.r12.1/compiler.32688072_0
//2 安装pytorch
//访问PyTorch官网
//https://pytorch.org/get-started/locally/
//列表中没有CUDA12的版本,官方表示向上兼容可以安装最新版
conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia
//3 安装transformers
conda install -c huggingface transformers
//4 安装accelerate
pip install accelerate
//5 生成Vicuna模型
//安装fastchat
pip3 install fschat
//生成模型
python3 -m fastchat.model.apply_delta \
--base ~/GPT/Modules/llama-7b-hf \
--target output/vicuna-7b \
--delta ~/GPT/Modules/vicuna-7b-delta-v0
//执行
Loading the base model from /home/ubuntu/GPT/Modules/llama-7b-hf
Loading checkpoint shards: 24%|███████████████████████████████████████████▉ | 8/33 [00:19<01:04, 2.56s/it]
安装MiniGPT-4
git clone https://github.com/Vision-CAIR/MiniGPT-4.git
cd MiniGPT-4
conda env create -f environment.yml
conda activate minigpt4
//下载MiniGPT-4的7B额外模型
//https://drive.google.com/file/d/1RY9jV0dyqLX-o38LrumkKRh6Jtaop58R/view?usp=sharing
//准备妥当之后修改配置文件
sudo nano MiniGPT-4/minigpt4/configs/models/minigpt4.yaml
//将llama_model:的值改成刚刚得到的Vicuna 7B文件夹路径
sudo nano MiniGPT-4/eval_configs/minigpt4_eval.yaml
//将ckpt:的值改成刚刚下到的额外模型参数的路径
//激动人心的时刻 启动!!!!!
python3 demo.py --cfg-path eval_configs/minigpt4_eval.yaml --gpu-id 0
1 条评论
博主太厉害了!