LlamaIndex中的CustomLLM(本地加载模型)
LlamaIndex 中接口基本上调用的是 OpenAI,如果想想调用自定义模型可以吗?答案当然是可以的。经过查找找到了自定义大语言模型的简单抽象基类 class CustomLLM(LLM)
。
一.CustomLLM(LLM)
源码
只要子类必须实现 __init__
、_complete
、_stream_complete
和 metadata
方法即可。
class CustomLLM(LLM):
"""Simple abstract base class for custom LLMs.
Subclasses must implement the `__init__`, `_complete`,
`_stream_complete`, and `metadata` methods.
"""
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
chat_fn = completion_to_chat_decorator(self.complete)
return chat_fn(messages, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
stream_chat_fn = stream_completion_to_chat_decorator(self.stream_complete)
return stream_chat_fn(messages, **kwargs)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self.chat(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@llm_completion_callback()
async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
return self.complete(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@classmethod
def class_name(cls) -> str:
return "custom_llm"
二.自定义 QwenCustomLLM
目标是实现可以从本地加载 Qwen 生态系统模型,具体实现如下所示:
from typing import Any
from llama_index import ServiceContext, SimpleDirectoryReader, SummaryIndex
from llama_index.llms import (
CustomLLM,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.llms.base import llm_completion_callback
from transformers import AutoTokenizer, AutoModelForCausalLM
class QwenCustomLLM(CustomLLM):
context_window: int = 8192 # 上下文窗口大小
num_output: int = 128 # 输出的token数量
model_name: str = "Qwen-1_8B-Chat" # 模型名称
tokenizer: object = None # 分词器
model: object = None # 模型
def __init__(self, pretrained_model_name_or_path):
super().__init__()
# GPU方式加载模型
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, device_map="cuda", trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map="cuda", trust_remote_code=True).eval()
# CPU方式加载模型
# self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, device_map="cpu", trust_remote_code=True)
# self.model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, device_map="cpu", trust_remote_code=True)
self.model = self.model.float()
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
# 得到LLM的元数据
return LLMMetadata(
context_window=self.context_window,
num_output=self.num_output,
model_name=self.model_name,
)
@llm_completion_callback() # 回调函数
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
# 完成函数
print("完成函数")
inputs = self.tokenizer.encode(prompt, return_tensors='pt').cuda() # GPU方式
# inputs = self.tokenizer.encode(prompt, return_tensors='pt') # CPU方式
outputs = self.model.generate(inputs, max_length=self.num_output)
response = self.tokenizer.decode(outputs[0])
return CompletionResponse(text=response)
@llm_completion_callback()
def stream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseGen:
# 流式完成函数
print("流式完成函数")
inputs = self.tokenizer.encode(prompt, return_tensors='pt').cuda() # GPU方式
# inputs = self.tokenizer.encode(prompt, return_tensors='pt') # CPU方式
outputs = self.model.generate(inputs, max_length=self.num_output)
response = self.tokenizer.decode(outputs[0])
for token in response:
yield CompletionResponse(text=token, delta=token)
if __name__ == "__main__":
# 定义你的LLM
pretrained_model_name_or_path = r'L:\20230713_HuggingFaceModel\20230925_Qwen\Qwen-1_8B'
llm = QwenCustomLLM(pretrained_model_name_or_path)
# 定义你的服务上下文
service_context = ServiceContext.from_defaults(
llm=llm, embed_model="local:L:/20230713_HuggingFaceModel/BAAI_bge-large-zh"
)
# 加载你的数据
documents = SimpleDirectoryReader("./data").load_data()
index = SummaryIndex.from_documents(documents, service_context=service_context)
# 查询和打印结果
query_engine = index.as_query_engine()
response = query_engine.query("花未眠")
print(response)
基本思路是加载 LLM 和 Embedding 模型,然后将 data 作为知识库进行查询。因为代码注释详细,不再赘述。
参考文献
[1] https://docs.llamaindex.ai/en/stable/
[2] https://github.com/run-llama/llama_index
[3] QwenCustomLLMOffline(本文源码):https://github.com/ai408/nlp-engineering/tree/main/知识工程-大语言模型/LlamaIndex 实战/自定义 LLM/QwenCustomLLMOffline
NLP工程化
1.本公众号以对话系统为中心,专注于Python/C++/CUDA、ML/DL/RL和NLP/KG/DS/LLM领域的技术分享。
2.本公众号Roadmap可查看飞书文档:https://z0yrmerhgi8.feishu.cn/wiki/Zpewwe2T2iCQfwkSyMOcgwdInhf
NLP工程化(公众号)
NLP工程化(星球号)