千问AI agent qwan_agent使用

代码,一个简单的例子:

# Reference: https://platform.openai.com/docs/guides/function-calling
import json
import os

# DASHSCOPE_API_KEY

from qwen_agent.llm import get_chat_model


# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit='fahrenheit'):
    """Get the current weather in a given location"""
    if 'tokyo' in location.lower():
        return json.dumps({'location': 'Tokyo', 'temperature': '10', 'unit': 'celsius'})
    elif 'san francisco' in location.lower():
        return json.dumps({'location': 'San Francisco', 'temperature': '72', 'unit': 'fahrenheit'})
    elif 'paris' in location.lower():
        return json.dumps({'location': 'Paris', 'temperature': '22', 'unit': 'celsius'})
    else:
        return json.dumps({'location': location, 'temperature': 'unknown'})


def test():
    llm = get_chat_model({
        # Use the model service provided by DashScope:
        # 'model': 'qwen-max',
        'model': 'qwen-plus',
        'model_server': 'dashscope',
        'api_key':  'sk-c78替换下c8',#os.getenv('DASHSCOPE_API_KEY'),

        # Use the model service provided by Together.AI:
        # 'model': 'Qwen/Qwen1.5-14B-Chat',
        # 'model_server': 'https://api.together.xyz',  # api_base
        # 'api_key': os.getenv('TOGETHER_API_KEY'),

        # Use your own model service compatible with OpenAI API:
        # 'model': 'Qwen/Qwen1.5-72B-Chat',
        # 'model_server': 'http://localhost:8000/v1',  # api_base
        # 'api_key': 'EMPTY',
    })

    # Step 1: send the conversation and available functions to the model
    messages = [{'role': 'user', 'content': "What's the weather like in San Francisco?"}]
    functions = [{
        'name': 'get_current_weather',
        'description': 'Get the current weather in a given location',
        'parameters': {
            'type': 'object',
            'properties': {
                'location': {
                    'type': 'string',
                    'description': 'The city and state, e.g. San Francisco, CA',
                },
                'unit': {
                    'type': 'string',
                    'enum': ['celsius', 'fahrenheit']
                },
            },
            'required': ['location'],
        },
    }]

    print('# Assistant Response 1:')
    responses = []
    for responses in llm.chat(messages=messages, functions=functions, stream=True):
        print(responses)

    messages.extend(responses)  # extend conversation with assistant's reply

    # Step 2: check if the model wanted to call a function
    last_response = messages[-1]
    print("*"*88)
    print(last_response)
    print("*"*88)
    if last_response.get('function_call', None):

        # Step 3: call the function
        # Note: the JSON response may not always be valid; be sure to handle errors
        available_functions = {
            'get_current_weather': get_current_weather,
        }  # only one function in this example, but you can have multiple
        function_name = last_response['function_call']['name']
        function_to_call = available_functions[function_name]
        function_args = json.loads(last_response['function_call']['arguments'])
        function_response = function_to_call(
            location=function_args.get('location'),
            unit=function_args.get('unit'),
        )
        print('# Function Response:')
        print(function_response)

        # Step 4: send the info for each function call and function response to the model
        messages.append({
            'role': 'function',
            'name': function_name,
            'content': function_response,
        })  # extend conversation with function response

        print('# Assistant Response 2:')
        for responses in llm.chat(
                messages=messages,
                functions=functions,
                stream=True,
        ):  # get a new response from the model where it can see the function response
            print(responses)


if __name__ == '__main__':
    test()

  

运行效果:

# Assistant Response 1:
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': ''}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': ''}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': '{\n  "location'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': '{\n  "location": "San Francisco'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': '{\n  "location": "San Francisco, CA",'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': '{\n  "location": "San Francisco, CA",\n  "unit": "'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': '{\n  "location": "San Francisco, CA",\n  "unit": "celsius"\n}'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': '{\n  "location": "San Francisco, CA",\n  "unit": "celsius"\n}'}}]
****************************************************************************************
{'role': 'assistant', 'content': '', 'function_call': {'name': 'get_current_weather', 'arguments': '{\n  "location": "San Francisco, CA",\n  "unit": "celsius"\n}'}}
****************************************************************************************
# Function Response:
{"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}
# Assistant Response 2:
[{'role': 'assistant', 'content': 'The'}]
[{'role': 'assistant', 'content': 'The current'}]
[{'role': 'assistant', 'content': 'The current weather'}]
[{'role': 'assistant', 'content': 'The current weather in San Francisco,'}]
[{'role': 'assistant', 'content': 'The current weather in San Francisco, California is 7'}]
[{'role': 'assistant', 'content': 'The current weather in San Francisco, California is 72 degrees Fahrenheit ('}]
[{'role': 'assistant', 'content': 'The current weather in San Francisco, California is 72 degrees Fahrenheit (approximately 22'}]
[{'role': 'assistant', 'content': 'The current weather in San Francisco, California is 72 degrees Fahrenheit (approximately 22.2 degrees Celsius'}]
[{'role': 'assistant', 'content': 'The current weather in San Francisco, California is 72 degrees Fahrenheit (approximately 22.2 degrees Celsius).'}]

  

再来一个复杂的工具调用例子:

import json
import os
from qwen_agent.llm import get_chat_model


# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit='fahrenheit'):
    """Get the current weather in a given location"""
    if 'tokyo' in location.lower():
        return json.dumps({'location': 'Tokyo', 'temperature': '10', 'unit': 'celsius'})
    elif 'san francisco' in location.lower():
        return json.dumps({'location': 'San Francisco', 'temperature': '72', 'unit': 'fahrenheit'})
    elif 'paris' in location.lower():
        return json.dumps({'location': 'Paris', 'temperature': '22', 'unit': 'celsius'})
    else:
        return json.dumps({'location': location, 'temperature': 'unknown'})

def check_inventory(flower_type: str = "玫瑰花") -> int:
    """
    查询特定类型花的库存数量,默认为玫瑰花。
    参数:
    - flower_type: 花的类型
    返回:
    - 库存数量
    """
    # 实际应用中这里应该是数据库查询或其他形式的库存检查
    return '130'  # 假设每种花都有130个单位

def get_extra_asset_price() -> float:
    """
    获取额外的固定资产价格。get extra asset price。
    参数: 无
    返回:
    - 额外的固定资产价格
    """
    return '500'


def process_responses(llm, messages, functions):
    print('# Assistant Response with:', messages)
    responses = []
    for responses in llm.chat(messages=messages, functions=functions, stream=True):
        print(responses)
        # responses.append(response)
    print("llm.chat over")
    # 注意,这里添加了回复,方便递归调用工具
    messages.extend(responses)  # Extend conversation with assistant's replies


def handle_function_calls(llm, messages, functions):
    process_responses(llm, messages, functions)
    last_response = messages[-1]
    print("last_res:", last_response)
    while last_response.get('function_call', None):
        available_functions = {
            'get_current_weather': get_current_weather,
            'check_inventory': check_inventory,
            'get_extra_asset_price': get_extra_asset_price
        }
        function_name = last_response['function_call']['name']
        function_to_call = available_functions.get(function_name)
        if function_to_call:
            function_args = json.loads(last_response['function_call']['arguments'])
            function_response = function_to_call(**function_args)
        else:
            function_response = "Function not found or not available in available_functions"
        print('# Function Response:', function_response)

        messages.append({
            'role': 'function',
            'name': function_name,
            'content': function_response,
        })  # Extend conversation with function response

        print("check messages:", messages)
        process_responses(llm, messages, functions)  # Get a new response from the model
        print("*" * 88)
        last_response = messages[-1]  # Update last response for next iteration

    # process_responses(llm, messages, functions)

def test():
    llm = get_chat_model({
        # Use the model service provided by DashScope:
        # 'model': 'qwen-max',
        'model': 'qwen-plus',
        'model_server': 'dashscope',
        'api_key':  'sk-c78896504df24xxxx7bf67529c8',#os.getenv('DASHSCOPE_API_KEY'),

        # Use the model service provided by Together.AI:
        # 'model': 'Qwen/Qwen1.5-14B-Chat',
        # 'model_server': 'https://api.together.xyz',  # api_base
        # 'api_key': os.getenv('TOGETHER_API_KEY'),

        # Use your own model service compatible with OpenAI API:
        # 'model': 'Qwen/Qwen1.5-72B-Chat',
        # 'model_server': 'http://localhost:8000/v1',  # api_base
        # 'api_key': 'EMPTY',
    })

    # Step 1: send the conversation and available functions to the model
    # messages = [{'role': 'user', 'content': "What's the weather like in San Francisco?"}]
    messages = [{'role': 'user', 'content': "通过使用工具(函数),查询玫瑰花的库存数量。已知玫瑰花的单价是10元,帮我计算玫瑰花总价加上额外固定资产的总价是多少?"}]
    functions = [{
        'name': 'get_current_weather',
        'description': 'Get the current weather in a given location',
        'parameters': {
            'type': 'object',
            'properties': {
                'location': {
                    'type': 'string',
                    'description': 'The city and state, e.g. San Francisco, CA',
                },
                'unit': {
                    'type': 'string',
                    'enum': ['celsius', 'fahrenheit']
                },
            },
            'required': ['location']}
        },
        {
            "name": "check_inventory",
            "description": "Check the inventory for a specific type of flower, default is roses",
            "parameters": {
                "type": "object",
                "properties": {
                    "flower_type": {
                        "type": "string",
                        "description": "Type of flower to check inventory for"
                    }
                },
                "required": []
            }
        },
        {
            "name": "get_extra_asset_price",
            "description": "Get the price of an extra fixed asset",
            "parameters": {
                "type": "object",
                "properties": {},
                "required": []
            }
        }
    ]
    handle_function_calls(llm, messages, functions)

if __name__ == '__main__':
    test()

  

发现千问agent效果还不错,基本上每次都能得到正确结果!

D:\Python\Python312\python.exe D:\source\pythonProject\qianwen_x.py 
# Assistant Response with: [{'role': 'user', 'content': '通过使用工具(函数),查询玫瑰花的库存数量。已知玫瑰花的单价是10元,帮我计算玫瑰花总价加上额外固定资产的总价是多少?'}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': ''}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "roses"}'}}]
[{'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "roses"}'}}]
llm.chat over
last_res: {'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "roses"}'}}
# Function Response: 130
check messages: [{'role': 'user', 'content': '通过使用工具(函数),查询玫瑰花的库存数量。已知玫瑰花的单价是10元,帮我计算玫瑰花总价加上额外固定资产的总价是多少?'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "roses"}'}}, {'role': 'function', 'name': 'check_inventory', 'content': '130'}]
# Assistant Response with: [{'role': 'user', 'content': '通过使用工具(函数),查询玫瑰花的库存数量。已知玫瑰花的单价是10元,帮我计算玫瑰花总价加上额外固定资产的总价是多少?'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "roses"}'}}, {'role': 'function', 'name': 'check_inventory', 'content': '130'}]
[{'role': 'assistant', 'content': '当'}]
[{'role': 'assistant', 'content': '当前'}]
[{'role': 'assistant', 'content': '当前我们'}]
[{'role': 'assistant', 'content': '当前我们有130'}]
[{'role': 'assistant', 'content': '当前我们有130朵玫瑰花在'}]
[{'role': 'assistant', 'content': '当前我们有130朵玫瑰花在库存。'}]
[{'role': 'assistant', 'content': '当前我们有130朵玫瑰花在库存。'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'get_extra', 'arguments': ''}}]
[{'role': 'assistant', 'content': '当前我们有130朵玫瑰花在库存。'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'get_extra_asset_price', 'arguments': ''}}]
[{'role': 'assistant', 'content': '当前我们有130朵玫瑰花在库存。'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'get_extra_asset_price', 'arguments': '{}'}}]
[{'role': 'assistant', 'content': '当前我们有130朵玫瑰花在库存。'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'get_extra_asset_price', 'arguments': '{}'}}]
llm.chat over
****************************************************************************************
# Function Response: 500
check messages: [{'role': 'user', 'content': '通过使用工具(函数),查询玫瑰花的库存数量。已知玫瑰花的单价是10元,帮我计算玫瑰花总价加上额外固定资产的总价是多少?'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "roses"}'}}, {'role': 'function', 'name': 'check_inventory', 'content': '130'}, {'role': 'assistant', 'content': '当前我们有130朵玫瑰花在库存。'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'get_extra_asset_price', 'arguments': '{}'}}, {'role': 'function', 'name': 'get_extra_asset_price', 'content': '500'}]
# Assistant Response with: [{'role': 'user', 'content': '通过使用工具(函数),查询玫瑰花的库存数量。已知玫瑰花的单价是10元,帮我计算玫瑰花总价加上额外固定资产的总价是多少?'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'check_inventory', 'arguments': '{"flower_type": "roses"}'}}, {'role': 'function', 'name': 'check_inventory', 'content': '130'}, {'role': 'assistant', 'content': '当前我们有130朵玫瑰花在库存。'}, {'role': 'assistant', 'content': '', 'function_call': {'name': 'get_extra_asset_price', 'arguments': '{}'}}, {'role': 'function', 'name': 'get_extra_asset_price', 'content': '500'}]
[{'role': 'assistant', 'content': '额'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 ='}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 130'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价加上额外固定资产的'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价加上额外固定资产的总价是13'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价加上额外固定资产的总价是1300元 +'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价加上额外固定资产的总价是1300元 + 500'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价加上额外固定资产的总价是1300元 + 500元 = 1'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价加上额外固定资产的总价是1300元 + 500元 = 1800元'}]
[{'role': 'assistant', 'content': '额外固定资产的价格是500元。\n\n现在,玫瑰花的总价是130朵 * 10元/朵 = 1300元。\n\n所以,玫瑰花总价加上额外固定资产的总价是1300元 + 500元 = 1800元。'}]
llm.chat over
****************************************************************************************

Process finished with exit code 0

  

不同模型的表现情况:

'model': 'qwen-max',
# 'model': 'qwen-turbo',
# 'model': 'qwen-long', # 无法正常执行
# 'model': 'qwen-7b-chat', #无法得到正确结果
# 'model': 'qwen-1.8b-chat', # 无法得到正确结果
# 'model': 'qwen-max-0428',
# 'model': 'qwen-plus',

整体来说,比langchain的react框架好用不少!
posted @ 2024-06-07 09:49  bonelee  阅读(20)  评论(0编辑  收藏  举报