Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 96 additions & 3 deletions sentry_sdk/integrations/pydantic_ai/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from sentry_sdk.integrations import DidNotEnable, Integration
import functools

from sentry_sdk.integrations import DidNotEnable, Integration

try:
import pydantic_ai # type: ignore # noqa: F401
from pydantic_ai import Agent
except ImportError:
raise DidNotEnable("pydantic-ai not installed")

Expand All @@ -14,10 +16,20 @@
_patch_tool_execution,
)

from .spans.ai_client import ai_client_span, update_ai_client_span

from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import Any
from pydantic_ai import ModelRequestContext, RunContext
from pydantic_ai.messages import ModelResponse # type: ignore


class PydanticAIIntegration(Integration):
identifier = "pydantic_ai"
origin = f"auto.ai.{identifier}"
are_request_hooks_available = True

def __init__(
self, include_prompts: bool = True, handled_tool_call_exceptions: bool = True
Expand Down Expand Up @@ -45,6 +57,87 @@
- Tool executions
"""
_patch_agent_run()
_patch_graph_nodes()
_patch_model_request()

try:
from pydantic_ai.capabilities import Hooks # type: ignore
except ImportError:
Hooks = None
PydanticAIIntegration.are_request_hooks_available = False

if Hooks is None:
_patch_graph_nodes()
_patch_model_request()
return

_patch_tool_execution()

# Assumptions:
# - Model requests within a run are sequential.
# - ctx.metadata is a shared dict instance between hooks.
hooks = Hooks()

@hooks.on.before_model_request # type: ignore
async def on_request(
ctx: "RunContext[None]", request_context: "ModelRequestContext"
) -> "ModelRequestContext":
span = ai_client_span(
messages=request_context.messages,
agent=None,
model=request_context.model,
model_settings=request_context.model_settings,
)
run_context_metadata = ctx.metadata
if isinstance(run_context_metadata, dict):
run_context_metadata["_sentry_span"] = span

span.__enter__()

return request_context

@hooks.on.after_model_request # type: ignore
async def on_response(
ctx: "RunContext[None]",
*,
request_context: "ModelRequestContext",
response: "ModelResponse",
) -> "ModelResponse":
run_context_metadata = ctx.metadata
if not isinstance(run_context_metadata, dict):
return response

span = run_context_metadata["_sentry_span"]

Check warning on line 108 in sentry_sdk/integrations/pydantic_ai/__init__.py

View check run for this annotation

@sentry/warden / warden: find-bugs

KeyError can crash on_response hook if _sentry_span key is missing

In the `on_response` hook (line 108), `run_context_metadata["_sentry_span"]` uses direct dictionary access without a default value. If the key is missing (e.g., due to race conditions, metadata mutation by external code, or unexpected hook ordering), this will raise a `KeyError` that propagates and could disrupt the pydantic-ai agent execution. The `on_error` hook at line 127 correctly uses `.pop("_sentry_span", None)` for safe access.
if span is None:
return response

update_ai_client_span(span, response)
span.__exit__(None, None, None)
del run_context_metadata["_sentry_span"]

return response

@hooks.on.model_request_error # type: ignore
async def on_error(
ctx: "RunContext[None]",
*,
request_context: "ModelRequestContext",
error: "Exception",
) -> "ModelResponse":
run_context_metadata = ctx.metadata
if isinstance(run_context_metadata, dict):
span = run_context_metadata.pop("_sentry_span", None)
if span is not None:
span.__exit__(type(error), error, error.__traceback__)
raise error

original_init = Agent.__init__

@functools.wraps(original_init)
def patched_init(
self: "Agent[Any, Any]", *args: "Any", **kwargs: "Any"
) -> None:
caps = list(kwargs.get("capabilities") or [])
caps.append(hooks)
kwargs["capabilities"] = caps
return original_init(self, *args, **kwargs)

Agent.__init__ = patched_init
16 changes: 16 additions & 0 deletions sentry_sdk/integrations/pydantic_ai/patches/agent_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,9 @@ def _create_run_wrapper(
original_func: The original run method
is_streaming: Whether this is a streaming method (for future use)
"""
from sentry_sdk.integrations.pydantic_ai import (
PydanticAIIntegration,
) # Required to avoid circular import

@wraps(original_func)
async def wrapper(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
Expand All @@ -107,6 +110,11 @@ async def wrapper(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
model = kwargs.get("model")
model_settings = kwargs.get("model_settings")

if PydanticAIIntegration.are_request_hooks_available:
metadata = kwargs.get("metadata")
if not metadata:
kwargs["metadata"] = {"_sentry_span": None}

# Create invoke_agent span
with invoke_agent_span(
user_prompt, self, model, model_settings, is_streaming
Expand Down Expand Up @@ -140,6 +148,9 @@ def _create_streaming_wrapper(
"""
Wraps run_stream method that returns an async context manager.
"""
from sentry_sdk.integrations.pydantic_ai import (
PydanticAIIntegration,
) # Required to avoid circular import

@wraps(original_func)
def wrapper(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
Expand All @@ -148,6 +159,11 @@ def wrapper(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
model = kwargs.get("model")
model_settings = kwargs.get("model_settings")

if PydanticAIIntegration.are_request_hooks_available:
metadata = kwargs.get("metadata")
if not metadata:
kwargs["metadata"] = {"_sentry_span": None}

# Call original function to get the context manager
original_ctx_manager = original_func(self, *args, **kwargs)

Expand Down
Loading
Loading