pydantic ai集成ollama简单试用
就是一个示例学习,体验下pydantic ai,llm 基于了ollama
参考代码
- app.py
一个集成ollama 的测试
import asyncio
from openai import AsyncOpenAI
from pydantic import BaseModel,Field
from pydantic_ai import Agent,RunContext
from pydantic_ai.models.openai import OpenAIModel
from typing import Any
USERS = {
"dalong":4
}
client = AsyncOpenAI(api_key='demo',base_url="http://localhost:11434/v1")
model = OpenAIModel('qwen2.5:3b', openai_client=client)
class UserResult(BaseModel):
age: int = Field(description='年龄')
agent = Agent(
model= model,
result_retries=3,
)
@agent.system_prompt
async def add_customer_name(ctx: RunContext[Any]) -> str:
customer_name = ctx.deps["user_name"]
print("from system_prompt",customer_name)
return f"姓名是 {customer_name!r}"
@agent.tool
async def get_user_age(ctx: RunContext[Any]) -> int:
"""返回用户年龄 """
user_age = USERS.get(ctx.deps["user_name"])
print("from tools",user_age)
return user_age
async def main():
result = await agent.run('我的年龄是多少?', deps={"user_name": "dalong"})
# The result will be validated with Pydantic to guarantee it is a `SupportResult`, since the agent is generic,
# it'll also be typed as a `SupportResult` to aid with static type checking.
print(result.data)
if __name__ == "__main__":
asyncio.run(main())
- 效果
注意模型太小的函数调用可能不会太稳定
说明
对于工具的使用需要支持函数调用,推荐使用最新的ollama 以及模式