LangChain 流式输出


from langchain_core.output_parsers import StrOutputParser

from langchain_core.runnables import RunnableParallel
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
import os
from operator import itemgetter

os.environ["OPENAI_API_KEY"] = "not empty"

llm = ChatOpenAI(model="Yi-1.5-34B-Chat", temperature="0",openai_api_base='http://127.0.0.1:6222/v1')
llm.invoke("中国的首都是哪里")
joke_chain = (
    ChatPromptTemplate.from_template("tell me a joke about {topic}")
    | llm| {"joke": StrOutputParser()}
)
poem_chain = (
    ChatPromptTemplate.from_template("write a 2-line poem about {topic}")
    | llm| {"poem": StrOutputParser()}
)

# runnable = RunnableParallel(joke=joke_chain, poem=poem_chain)
runnable = RunnableParallel({"joke":joke_chain,"poem":poem_chain}).assign(answer=itemgetter("poem"))

# Display stream
output = {key: "" for key, _ in runnable.output_schema()}
for chunk in runnable.stream({"topic": "bear"}):
    # for key in chunk:
    #     output[key] = output[key] + chunk[key].content
    print(chunk)  # noqa: T201
{'poem': {'poem': ''}}
{'poem': {'poem': 'In'}}
{'joke': {'joke': ''}}
{'joke': {'joke': 'Why'}}
{'joke': {'joke': ' did'}}
{'poem': {'poem': ' the'}}
{'poem': {'poem': ' forest'}}
{'joke': {'joke': ' the'}}
{'poem': {'poem': ' deep'}}
{'joke': {'joke': ' bear'}}
{'poem': {'poem': ' and'}}
{'joke': {'joke': ' sit'}}
{'poem': {'poem': ' grand'}}
{'joke': {'joke': ' next'}}
{'joke': {'joke': ' to'}}
{'poem': {'poem': ','}}
{'poem': {'poem': '\n'}}
{'joke': {'joke': ' the'}}
{'joke': {'joke': ' camp'}}
{'poem': {'poem': 'A'}}
{'poem': {'poem': ' bear'}}
{'joke': {'joke': 'fire'}}
{'poem': {'poem': ','}}
{'joke': {'joke': '?'}}
{'poem': {'poem': ' both'}}
{'joke': {'joke': '\n'}}
{'poem': {'poem': ' fierce'}}
{'joke': {'joke': '\n'}}
{'poem': {'poem': ' and'}}
{'joke': {'joke': 'Because'}}
{'poem': {'poem': ' gentle'}}
{'joke': {'joke': ' he'}}
{'poem': {'poem': ','}}
{'joke': {'joke': ' couldn'}}
{'poem': {'poem': ' stands'}}
{'joke': {'joke': "'"}}
{'poem': {'poem': ' tall'}}
{'joke': {'joke': 't'}}
{'poem': {'poem': ','}}
{'joke': {'joke': ' bear'}}
{'poem': {'poem': ' hand'}}
{'joke': {'joke': ' to'}}
{'poem': {'poem': ' in'}}
{'joke': {'joke': ' be'}}
{'poem': {'poem': ' hand'}}
{'joke': {'joke': ' cold'}}
{'joke': {'joke': '!'}}
{'poem': {'poem': '.'}}
{'poem': {'poem': ''}}
{'joke': {'joke': ' <'}}
{'joke': {'joke': '|'}}
{'joke': {'joke': 'im'}}
{'joke': {'joke': '_'}}
{'joke': {'joke': 'end'}}
{'joke': {'joke': '|>'}}
{'joke': {'joke': '\n'}}
{'joke': {'joke': '\n'}}
{'joke': {'joke': '('}}
{'joke': {'joke': 'Note'}}
{'joke': {'joke': ':'}}
{'joke': {'joke': ' Always'}}
{'joke': {'joke': ' ensure'}}
{'joke': {'joke': ' the'}}
{'joke': {'joke': ' joke'}}
{'joke': {'joke': ' is'}}
{'joke': {'joke': ' appropriate'}}
{'joke': {'joke': ' and'}}
{'joke': {'joke': ' in'}}
{'joke': {'joke': ' good'}}
{'joke': {'joke': ' taste'}}
{'joke': {'joke': '.'}}
{'joke': {'joke': ')'}}
{'joke': {'joke': ''}}
{'answer': {'poem': 'In the forest deep and grand,\nA bear, both fierce and gentle, stands tall, hand in hand.'}}
from operator import itemgetter
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import Runnable
from langchain_core.language_models import FakeStreamingListLLM
from langchain_core.prompts import SystemMessagePromptTemplate

prompt = (
    SystemMessagePromptTemplate.from_template("你是一个智能助手")
    + "{question}"
)
# llm = FakeStreamingListLLM(responses=["foo-lish"])
llm = ChatOpenAI(model="Yi-1.5-34B-Chat", temperature="0",openai_api_base='http://127.0.0.1:6222/v1')

chain: Runnable = prompt | llm | {"str": StrOutputParser()}

chain_with_assign = chain.assign(hello=itemgetter("str") | llm)

print(chain_with_assign.input_schema.schema())
# {'title': 'PromptInput', 'type': 'object', 'properties':
print(chain_with_assign.output_schema.schema()) #

{'title': 'PromptInput', 'type': 'object', 'properties': {'question': {'title': 'Question', 'type': 'string'}}}
{'title': 'RunnableSequenceOutput', 'type': 'object', 'properties': {'str': {'title': 'Str', 'type': 'string'}, 'hello': {'title': 'Hello', 'anyOf': [{'$ref': '#/definitions/AIMessage'}, {'$ref': '#/definitions/HumanMessage'}, {'$ref': '#/definitions/ChatMessage'}, {'$ref': '#/definitions/SystemMessage'}, {'$ref': '#/definitions/FunctionMessage'}, {'$ref': '#/definitions/ToolMessage'}]}}, 'definitions': {'ToolCall': {'title': 'ToolCall', 'type': 'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}, 'args': {'title': 'Args', 'type': 'object'}, 'id': {'title': 'Id', 'type': 'string'}}, 'required': ['name', 'args', 'id']}, 'InvalidToolCall': {'title': 'InvalidToolCall', 'type': 'object', 'properties': {'name': {'title': 'Name', 'type': 'string'}, 'args': {'title': 'Args', 'type': 'string'}, 'id': {'title': 'Id', 'type': 'string'}, 'error': {'title': 'Error', 'type': 'string'}}, 'required': ['name', 'args', 'id', 'error']}, 'AIMessage': {'title': 'AIMessage', 'description': 'Message from an AI.', 'type': 'object', 'properties': {'content': {'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {'title': 'Additional Kwargs', 'type': 'object'}, 'response_metadata': {'title': 'Response Metadata', 'type': 'object'}, 'type': {'title': 'Type', 'default': 'ai', 'enum': ['ai'], 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}, 'id': {'title': 'Id', 'type': 'string'}, 'example': {'title': 'Example', 'default': False, 'type': 'boolean'}, 'tool_calls': {'title': 'Tool Calls', 'default': [], 'type': 'array', 'items': {'$ref': '#/definitions/ToolCall'}}, 'invalid_tool_calls': {'title': 'Invalid Tool Calls', 'default': [], 'type': 'array', 'items': {'$ref': '#/definitions/InvalidToolCall'}}}, 'required': ['content']}, 'HumanMessage': {'title': 'HumanMessage', 'description': 'Message from a human.', 'type': 'object', 'properties': {'content': {'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {'title': 'Additional Kwargs', 'type': 'object'}, 'response_metadata': {'title': 'Response Metadata', 'type': 'object'}, 'type': {'title': 'Type', 'default': 'human', 'enum': ['human'], 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}, 'id': {'title': 'Id', 'type': 'string'}, 'example': {'title': 'Example', 'default': False, 'type': 'boolean'}}, 'required': ['content']}, 'ChatMessage': {'title': 'ChatMessage', 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'type': 'object', 'properties': {'content': {'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {'title': 'Additional Kwargs', 'type': 'object'}, 'response_metadata': {'title': 'Response Metadata', 'type': 'object'}, 'type': {'title': 'Type', 'default': 'chat', 'enum': ['chat'], 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}, 'id': {'title': 'Id', 'type': 'string'}, 'role': {'title': 'Role', 'type': 'string'}}, 'required': ['content', 'role']}, 'SystemMessage': {'title': 'SystemMessage', 'description': 'Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.', 'type': 'object', 'properties': {'content': {'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {'title': 'Additional Kwargs', 'type': 'object'}, 'response_metadata': {'title': 'Response Metadata', 'type': 'object'}, 'type': {'title': 'Type', 'default': 'system', 'enum': ['system'], 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}, 'id': {'title': 'Id', 'type': 'string'}}, 'required': ['content']}, 'FunctionMessage': {'title': 'FunctionMessage', 'description': 'Message for passing the result of executing a function back to a model.', 'type': 'object', 'properties': {'content': {'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {'title': 'Additional Kwargs', 'type': 'object'}, 'response_metadata': {'title': 'Response Metadata', 'type': 'object'}, 'type': {'title': 'Type', 'default': 'function', 'enum': ['function'], 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}, 'id': {'title': 'Id', 'type': 'string'}}, 'required': ['content', 'name']}, 'ToolMessage': {'title': 'ToolMessage', 'description': 'Message for passing the result of executing a tool back to a model.', 'type': 'object', 'properties': {'content': {'title': 'Content', 'anyOf': [{'type': 'string'}, {'type': 'array', 'items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}]}, 'additional_kwargs': {'title': 'Additional Kwargs', 'type': 'object'}, 'response_metadata': {'title': 'Response Metadata', 'type': 'object'}, 'type': {'title': 'Type', 'default': 'tool', 'enum': ['tool'], 'type': 'string'}, 'name': {'title': 'Name', 'type': 'string'}, 'id': {'title': 'Id', 'type': 'string'}, 'tool_call_id': {'title': 'Tool Call Id', 'type': 'string'}}, 'required': ['content', 'tool_call_id']}}}
chain_with_assign.invoke("中国首都")
{'str': '中国的首都是北京。',
 'hello': AIMessage(content='是的,中国的首都是北京。北京是中国的政治、文化、国际交流和科技创新中心之一,拥有众多的历史遗迹和现代建筑,同时也是中国的主要交通枢纽。北京以其悠久的历史、丰富的文化遗产和独特的生活方式而闻名于世。', response_metadata={'token_usage': {'completion_tokens': 52, 'prompt_tokens': 25, 'total_tokens': 77}, 'model_name': 'Yi-1.5-34B-Chat', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}, id='run-5a90a972-8b9f-41f7-831e-b87444681419-0')}

posted @ 2024-05-15 17:51    阅读(240)  评论(0编辑  收藏  举报