LCEL的具体实验

一、基本chain

from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.llms.chatglm3 import ChatGLM3
import os
os.environ["LANGCHAIN_TRACING_V2"] ="true"
os.environ["LANGCHAIN_API_KEY"]="ls__96d567894428421db2d42dec4edde0b4"
#llm
endpoint_url = "https://u4378-ad4b-55fa9b48.westb.seetacloud.com:8443/v1/chat/completions"
llm = ChatGLM3(
    endpoint_url=endpoint_url,
    max_tokens=8000,
    top_p=0.9,
    timeout=999
)
prompt = ChatPromptTemplate.from_template("tell me a short joke about {topic}")
output_parser = StrOutputParser()
chain = prompt | llm | output_parser
chain.invoke({"topic": "ice cream"})

  

6a340308-065e-4f33-937b-c5add1c8aab6

1fe4be04-ca72-46d9-a88a-8f9f2e5f577f

二、rag search example

 

from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_openai import OpenAIEmbeddings
from langchain_community.llms.chatglm3 import ChatGLM3
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
import os
os.environ["LANGCHAIN_TRACING_V2"] ="true"
os.environ["LANGCHAIN_API_KEY"]="ls__96d567894428421db2d42dec4edde0b4"
#llm
endpoint_url = "https://u4378-ad4b-55fa9b48.westb.seetacloud.com:8443/v1/chat/completions"
llm = ChatGLM3(
    endpoint_url=endpoint_url,
    max_tokens=8000,
    top_p=0.9,
    timeout=999
)
#embeded knowledge
model_name = "/home/jsxyhelu/CODES/bge-large-zh-v1.5"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embedding = HuggingFaceBgeEmbeddings(
                model_name=model_name,
                model_kwargs=model_kwargs,
                encode_kwargs=encode_kwargs,
                query_instruction="为这个句子生成表示以用于检索相关文章:"
            )
#start
vectorstore = DocArrayInMemorySearch.from_texts(
    ["harrison worked at kensho", "bears like to eat honey"],
    embedding=embedding,
)
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
output_parser = StrOutputParser()
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
chain = setup_and_retrieval | prompt | llm | output_parser
chain.invoke("where did harrison work?")

  

更精简、更现代,应该是被选择的方法。

b9e0dfaa-e671-4409-8166-4a6d4bded762

e859f44f-13ae-40dd-b0ab-f2f5be93ba03

我认为掌握到这个水平完全是足够 了。

posted on 2024-04-10 20:15  jsxyhelu  阅读(21)  评论(0编辑  收藏  举报

导航