LCEL的具体实验
一、基本chain
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 | from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_community.llms.chatglm3 import ChatGLM3 import os os.environ[ "LANGCHAIN_TRACING_V2" ] = "true" os.environ[ "LANGCHAIN_API_KEY" ] = "ls__96d567894428421db2d42dec4edde0b4" #llm endpoint_url = "https://u4378-ad4b-55fa9b48.westb.seetacloud.com:8443/v1/chat/completions" llm = ChatGLM3( endpoint_url = endpoint_url, max_tokens = 8000 , top_p = 0.9 , timeout = 999 ) prompt = ChatPromptTemplate.from_template( "tell me a short joke about {topic}" ) output_parser = StrOutputParser() chain = prompt | llm | output_parser chain.invoke({ "topic" : "ice cream" }) |
二、rag search example
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | from langchain_community.vectorstores import DocArrayInMemorySearch from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnableParallel, RunnablePassthrough from langchain_openai import OpenAIEmbeddings from langchain_community.llms.chatglm3 import ChatGLM3 from langchain_community.embeddings import HuggingFaceBgeEmbeddings import os os.environ[ "LANGCHAIN_TRACING_V2" ] = "true" os.environ[ "LANGCHAIN_API_KEY" ] = "ls__96d567894428421db2d42dec4edde0b4" #llm endpoint_url = "https://u4378-ad4b-55fa9b48.westb.seetacloud.com:8443/v1/chat/completions" llm = ChatGLM3( endpoint_url = endpoint_url, max_tokens = 8000 , top_p = 0.9 , timeout = 999 ) #embeded knowledge model_name = "/home/jsxyhelu/CODES/bge-large-zh-v1.5" model_kwargs = { 'device' : 'cpu' } encode_kwargs = { 'normalize_embeddings' : True } embedding = HuggingFaceBgeEmbeddings( model_name = model_name, model_kwargs = model_kwargs, encode_kwargs = encode_kwargs, query_instruction = "为这个句子生成表示以用于检索相关文章:" ) #start vectorstore = DocArrayInMemorySearch.from_texts( [ "harrison worked at kensho" , "bears like to eat honey" ], embedding = embedding, ) retriever = vectorstore.as_retriever() template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) output_parser = StrOutputParser() setup_and_retrieval = RunnableParallel( { "context" : retriever, "question" : RunnablePassthrough()} ) chain = setup_and_retrieval | prompt | llm | output_parser chain.invoke( "where did harrison work?" ) |
更精简、更现代,应该是被选择的方法。
我认为掌握到这个水平完全是足够 了。
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· AI编程工具终极对决:字节Trae VS Cursor,谁才是开发者新宠?
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!