Skip to content

Instantly share code, notes, and snippets.

@skylarbpayne
Created December 3, 2024 20:51
Show Gist options
  • Save skylarbpayne/7ff1fcabacb772e84a802b1f6bf178ed to your computer and use it in GitHub Desktop.
Save skylarbpayne/7ff1fcabacb772e84a802b1f6bf178ed to your computer and use it in GitHub Desktop.
# Agents can fail in fantastic ways, and stacktraces are unfortunately not always helpful enough.
# Fortunately, Pydantic AI agents keep track of messages, so as long as you save them you can view them and aid your debugging!
# This gist shows a simple `try_run` implementation which will log out the messages on error.
# You can imagine any number of things to do with the messages: send to logfire, store in a database, write to file, etc
# Pro tip: these form a helpful subset of data input/outputs to help refine your agent! Taking some time to store them
# appropriately for review (and possibly fine tuning / use in prompts / etc in the future) will pay off!!
These are examples where your agent actually failed!
from __future__ import annotations
import asyncio
import logging
from pydantic import BaseModel, field_validator
from pydantic_ai import Agent
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def try_run(agent: Agent, *agent_args, **agent_kwargs):
"""Run an agent, catching and logging any exceptions.
Useful for debugging as it logs the last run messages on failure.
"""
try:
return await agent.run(*agent_args, **agent_kwargs)
except Exception:
# Format messages nicely with indentation for readability
messages_str = "\n".join(
f"\t{msg}"
for msg in agent.last_run_messages or []
)
logger.exception(f"Error running agent. Last run messages:\n{messages_str}")
raise
# Create an intentionally complex model that should be hard for the AI to populate correctly
class Location(BaseModel):
latitude: float # Must be between -90 and 90
longitude: float # Must be between -180 and 180
@field_validator("latitude")
def validate_latitude(cls, v: float) -> float:
global FORCE_ERROR
if FORCE_ERROR:
raise ValueError("Simulated parsing error")
return v
# Set to True to force an error during parsing.
FORCE_ERROR = False
# Initialize agent with our complex model
agent = Agent("openai:gpt-4o-mini", result_type=Location)
async def main():
"""Run example that should trigger an error due to schema complexity."""
global FORCE_ERROR
# Things are awesome when they 'just work'... But that's not always the case!
print("Normal, successful run.")
result = await agent.run("Describe Death Valley, California.")
print(f"Result: {result.data}")
FORCE_ERROR = True
# Here, the error just gets printed out... Kind of hard to understand "why" the error happened though
print("Normal, error run.")
try:
result = await agent.run("Describe Death Valley, California.")
print(f"Result: {result.data}")
except Exception as e:
print(f"Error: {e}")
# Here we get a full rendition of what happened and can trace through the steps!
print("Try run helper, Error run.")
try:
result = await try_run(agent, "Describe Death Valley, California.")
print(f"Result: {result.data}")
except Exception:
print('See error message above.')
if __name__ == "__main__":
asyncio.run(main())
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment