Skip to content

pydantic_ai.low_level

Low-level methods to make requests directly to models with minimal abstraction.

These methods allow you to make requests to LLMs where the only abstraction is input and output schema translation so you can request all models with the same API.

These methods are thin wrappers around Model implementations.

LowLevelModelResponse dataclass

Bases: ModelResponse

Subclass of ModelResponse that includes usage information.

Source code in pydantic_ai_slim/pydantic_ai/low_level.py
23
24
25
26
27
28
29
@dataclasses.dataclass
class LowLevelModelResponse(messages.ModelResponse):
    """Subclass of [`ModelResponse`][pydantic_ai.messages.ModelResponse] that includes usage information."""

    # the default_factor is required here to passify dataclasses since `ModelResponse` has fields with defaults :-(
    usage: usage.Usage = dataclasses.field(default_factory=usage.Usage)
    """Usage information for the request."""

usage class-attribute instance-attribute

usage: Usage = field(default_factory=Usage)

Usage information for the request.

model_request async

model_request(
    model: Model | KnownModelName | str,
    messages: list[ModelMessage],
    *,
    model_settings: ModelSettings | None = None,
    model_request_parameters: (
        ModelRequestParameters | None
    ) = None,
    instrument: InstrumentationSettings | bool | None = None
) -> LowLevelModelResponse

Make a non-streamed request to a model.

model_request_example.py
from pydantic_ai.low_level import model_request
from pydantic_ai.messages import ModelRequest


async def main():
    model_response = await model_request(
        'anthropic:claude-3-5-haiku-latest',
        [ModelRequest.user_text_prompt('What is the capital of France?')]  # (1)!
    )
    print(model_response)
    '''
    LowLevelModelResponse(
        parts=[TextPart(content='Paris', part_kind='text')],
        model_name='claude-3-5-haiku-latest',
        timestamp=datetime.datetime(...),
        kind='response',
        usage=Usage(
            requests=1,
            request_tokens=56,
            response_tokens=1,
            total_tokens=57,
            details=None,
        ),
    )
    '''
  1. See ModelRequest.user_text_prompt for details.

Then

Parameters:

Name Type Description Default
model Model | KnownModelName | str

The model to make a request to. We allow str here since the actual list of allowed models changes frequently.

required
messages list[ModelMessage]

Messages to send to the model

required
model_settings ModelSettings | None

optional model settings

None
model_request_parameters ModelRequestParameters | None

optional model request parameters

None
instrument InstrumentationSettings | bool | None

Whether to instrument the request with OpenTelemetry/logfire, if None the value from logfire.instrument_pydantic_ai is used.

None

Returns:

Type Description
LowLevelModelResponse

The model response and token usage associated with the request.

Source code in pydantic_ai_slim/pydantic_ai/low_level.py
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
async def model_request(
    model: models.Model | models.KnownModelName | str,
    messages: list[messages.ModelMessage],
    *,
    model_settings: settings.ModelSettings | None = None,
    model_request_parameters: models.ModelRequestParameters | None = None,
    instrument: instrumented_models.InstrumentationSettings | bool | None = None,
) -> LowLevelModelResponse:
    """Make a non-streamed request to a model.

    ```py title="model_request_example.py"
    from pydantic_ai.low_level import model_request
    from pydantic_ai.messages import ModelRequest


    async def main():
        model_response = await model_request(
            'anthropic:claude-3-5-haiku-latest',
            [ModelRequest.user_text_prompt('What is the capital of France?')]  # (1)!
        )
        print(model_response)
        '''
        LowLevelModelResponse(
            parts=[TextPart(content='Paris', part_kind='text')],
            model_name='claude-3-5-haiku-latest',
            timestamp=datetime.datetime(...),
            kind='response',
            usage=Usage(
                requests=1,
                request_tokens=56,
                response_tokens=1,
                total_tokens=57,
                details=None,
            ),
        )
        '''
    ```

    1. See [`ModelRequest.user_text_prompt`][pydantic_ai.messages.ModelRequest.user_text_prompt] for details.

    Then

    Args:
        model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
        messages: Messages to send to the model
        model_settings: optional model settings
        model_request_parameters: optional model request parameters
        instrument: Whether to instrument the request with OpenTelemetry/logfire, if `None` the value from
            [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.

    Returns:
        The model response and token usage associated with the request.
    """
    model_instance = _prepare_model(model, instrument)
    model_response, usage = await model_instance.request(
        messages,
        model_settings,
        model_instance.customize_request_parameters(model_request_parameters or models.ModelRequestParameters()),
    )
    usage.requests += 1
    return LowLevelModelResponse(
        parts=model_response.parts,
        model_name=model_response.model_name,
        timestamp=model_response.timestamp,
        usage=usage,
    )

model_request_sync

model_request_sync(
    model: Model | KnownModelName | str,
    messages: list[ModelMessage],
    *,
    model_settings: ModelSettings | None = None,
    model_request_parameters: (
        ModelRequestParameters | None
    ) = None,
    instrument: InstrumentationSettings | bool | None = None
) -> LowLevelModelResponse

Make a Synchronous, non-streamed request to a model.

This is a convenience method that wraps model_request with loop.run_until_complete(...). You therefore can't use this method inside async code or if there's an active event loop.

model_request_sync_example.py
from pydantic_ai.low_level import model_request_sync
from pydantic_ai.messages import ModelRequest

model_response = model_request_sync(
    'anthropic:claude-3-5-haiku-latest',
    [ModelRequest.user_text_prompt('What is the capital of France?')]
)
print(model_response)
'''
LowLevelModelResponse(
    parts=[TextPart(content='Paris', part_kind='text')],
    model_name='claude-3-5-haiku-latest',
    timestamp=datetime.datetime(...),
    kind='response',
    usage=Usage(
        requests=1, request_tokens=56, response_tokens=1, total_tokens=57, details=None
    ),
)
'''

Parameters:

Name Type Description Default
model Model | KnownModelName | str

The model to make a request to. We allow str here since the actual list of allowed models changes frequently.

required
messages list[ModelMessage]

Messages to send to the model

required
model_settings ModelSettings | None

optional model settings

None
model_request_parameters ModelRequestParameters | None

optional model request parameters

None
instrument InstrumentationSettings | bool | None

Whether to instrument the request with OpenTelemetry/logfire, if None the value from logfire.instrument_pydantic_ai is used.

None

Returns:

Type Description
LowLevelModelResponse

The model response and token usage associated with the request.

Source code in pydantic_ai_slim/pydantic_ai/low_level.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def model_request_sync(
    model: models.Model | models.KnownModelName | str,
    messages: list[messages.ModelMessage],
    *,
    model_settings: settings.ModelSettings | None = None,
    model_request_parameters: models.ModelRequestParameters | None = None,
    instrument: instrumented_models.InstrumentationSettings | bool | None = None,
) -> LowLevelModelResponse:
    """Make a Synchronous, non-streamed request to a model.

    This is a convenience method that wraps [`model_request`][pydantic_ai.low_level.model_request] with
    `loop.run_until_complete(...)`. You therefore can't use this method inside async code or if there's an active event loop.

    ```py title="model_request_sync_example.py"
    from pydantic_ai.low_level import model_request_sync
    from pydantic_ai.messages import ModelRequest

    model_response = model_request_sync(
        'anthropic:claude-3-5-haiku-latest',
        [ModelRequest.user_text_prompt('What is the capital of France?')]
    )
    print(model_response)
    '''
    LowLevelModelResponse(
        parts=[TextPart(content='Paris', part_kind='text')],
        model_name='claude-3-5-haiku-latest',
        timestamp=datetime.datetime(...),
        kind='response',
        usage=Usage(
            requests=1, request_tokens=56, response_tokens=1, total_tokens=57, details=None
        ),
    )
    '''
    ```

    Args:
        model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
        messages: Messages to send to the model
        model_settings: optional model settings
        model_request_parameters: optional model request parameters
        instrument: Whether to instrument the request with OpenTelemetry/logfire, if `None` the value from
            [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.

    Returns:
        The model response and token usage associated with the request.
    """
    return _get_event_loop().run_until_complete(
        model_request(
            model,
            messages,
            model_settings=model_settings,
            model_request_parameters=model_request_parameters,
            instrument=instrument,
        )
    )

model_request_stream async

model_request_stream(
    model: Model | KnownModelName | str,
    messages: list[ModelMessage],
    *,
    model_settings: ModelSettings | None = None,
    model_request_parameters: (
        ModelRequestParameters | None
    ) = None,
    instrument: InstrumentationSettings | bool | None = None
) -> AsyncIterator[StreamedResponse]

Make a streamed async request to a model.

model_request_stream_example.py
from pydantic_ai.low_level import model_request_stream
from pydantic_ai.messages import ModelRequest


async def main():
    messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]
    async with model_request_stream( 'openai:gpt-4.1-mini', messages) as stream:
        chunks = []
        async for chunk in stream:
            chunks.append(chunk)
        print(chunks)
        '''
        [
            PartStartEvent(
                index=0,
                part=TextPart(content='Albert Einstein was ', part_kind='text'),
                event_kind='part_start',
            ),
            PartDeltaEvent(
                index=0,
                delta=TextPartDelta(
                    content_delta='a German-born theoretical ', part_delta_kind='text'
                ),
                event_kind='part_delta',
            ),
            PartDeltaEvent(
                index=0,
                delta=TextPartDelta(content_delta='physicist.', part_delta_kind='text'),
                event_kind='part_delta',
            ),
        ]
        '''

Parameters:

Name Type Description Default
model Model | KnownModelName | str

The model to make a request to. We allow str here since the actual list of allowed models changes frequently.

required
messages list[ModelMessage]

Messages to send to the model

required
model_settings ModelSettings | None

optional model settings

None
model_request_parameters ModelRequestParameters | None

optional model request parameters

None
instrument InstrumentationSettings | bool | None

Whether to instrument the request with OpenTelemetry/logfire, if None the value from logfire.instrument_pydantic_ai is used.

None

Returns:

Type Description
AsyncIterator[StreamedResponse]

A stream response async context manager.

Source code in pydantic_ai_slim/pydantic_ai/low_level.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
@asynccontextmanager
async def model_request_stream(
    model: models.Model | models.KnownModelName | str,
    messages: list[messages.ModelMessage],
    *,
    model_settings: settings.ModelSettings | None = None,
    model_request_parameters: models.ModelRequestParameters | None = None,
    instrument: instrumented_models.InstrumentationSettings | bool | None = None,
) -> AsyncIterator[models.StreamedResponse]:
    """Make a streamed async request to a model.

    ```py {title="model_request_stream_example.py"}

    from pydantic_ai.low_level import model_request_stream
    from pydantic_ai.messages import ModelRequest


    async def main():
        messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]
        async with model_request_stream( 'openai:gpt-4.1-mini', messages) as stream:
            chunks = []
            async for chunk in stream:
                chunks.append(chunk)
            print(chunks)
            '''
            [
                PartStartEvent(
                    index=0,
                    part=TextPart(content='Albert Einstein was ', part_kind='text'),
                    event_kind='part_start',
                ),
                PartDeltaEvent(
                    index=0,
                    delta=TextPartDelta(
                        content_delta='a German-born theoretical ', part_delta_kind='text'
                    ),
                    event_kind='part_delta',
                ),
                PartDeltaEvent(
                    index=0,
                    delta=TextPartDelta(content_delta='physicist.', part_delta_kind='text'),
                    event_kind='part_delta',
                ),
            ]
            '''
    ```

    Args:
        model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
        messages: Messages to send to the model
        model_settings: optional model settings
        model_request_parameters: optional model request parameters
        instrument: Whether to instrument the request with OpenTelemetry/logfire, if `None` the value from
            [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.

    Returns:
        A [stream response][pydantic_ai.models.StreamedResponse] async context manager.
    """
    model_instance = _prepare_model(model, instrument)
    stream_cxt_mgr = model_instance.request_stream(
        messages,
        model_settings,
        model_instance.customize_request_parameters(model_request_parameters or models.ModelRequestParameters()),
    )
    async with stream_cxt_mgr as streamed_response:
        yield streamed_response