python 部署chatglm2b

下载模型实现 然后手动下载模型 GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/THUDM/chatglm2-6b

cd ChatGLM2-6B

mkdir modelsglm && cd modelsglm

vim downs.py

# coding=utf-8
import requests


url1='https://cloud.tsinghua.edu.cn/d/674208019e314311ab5c/files/?p=%2Fchatglm2-6b%2Fpytorch_model-0000'
url2='-of-00007.bin&dl=1'
save_path1='pytorch_model-0000'
save_path2='-of-00007.bin'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36'}
# 循环获取models,总共有7个基础模型
for i in range(7):
    url=url1+str(i+1)+url2
    print(url)
    save_path=save_path1+str(i+1)+save_path2
    res = requests.get(url,headers=headers)


    file1 =open(save_path,'wb')
    file1.write(res.content)
    file1.close()
    print("第{}个模型下载已完成".format(i+1))

python downs.py

等待模型下载完成

sudo vim web_demo.py

tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).cuda()

替换为

tokenizer = AutoTokenizer.from_pretrained(/modelglm", trust_remote_code=True)
model = AutoModel.from_pretrained("/modelglm", trust_remote_code=True).cuda()

这个按需添加吧 如果报错的话就加这个

export LD_LIBRARY_PATH=(python的cuda的路径):$LD_LIBRARY_PATH
pip install torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118

torch-2.0.1%2Bcu118-cp311-cp311-linux_x86_64.whl

torchvision-0.15.2+cu118-cp311-cp311-linux_x86_64.whl

torchaudio-2.0.1%2Bcu118-cp311-cp311-linux_x86_64.whl

pip install torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu118

nohup pip install torch2.0.1+cu118 torchvision0.15.2+cu118 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu118 > myoutput.file 2>&1 &

pip install torch2.0.1+cu118 torchvision0.15.2+cu118 torchaudio==2.0.1 -i https://pypi.tuna.tsinghua.edu.cn/simple

原始版本

demo.queue().launch(share=False, inbrowser=True)

替换这个
demo.queue().launch(share=False, inbrowser=True,server_name="0.0.0.0",server_port=8898)

demo.queue().launch(server_name="0.0.0.0", server_port=8898)

如果前端出现问题

删除pip uninstall gradio

请安装 pip install gradio==3.39.0

后台运行 nohup python web_demo.py > /dev/null 2>&1

posted @ 2023-10-24 22:16  尘梦  阅读(86)  评论(0编辑  收藏  举报