Model interface - OpenAI Agents SDK

Bases: ABC

The base interface for calling an LLM.

Source code in src/agents/models/interface.py
class Model(abc.ABC):
    """The base interface for calling an LLM."""

    async def close(self) -> None:
        """Release any resources held by the model.

        Models that maintain persistent connections can override this. The default implementation
        is a no-op.
        """
        return None

    @abc.abstractmethod
    async def get_response(
        self,
        system_instructions: str | None,
        input: str | list[TResponseInputItem],
        model_settings: ModelSettings,
        tools: list[Tool],
        output_schema: AgentOutputSchemaBase | None,
        handoffs: list[Handoff],
        tracing: ModelTracing,
        *,
        previous_response_id: str | None,
        conversation_id: str | None,
        prompt: ResponsePromptParam | None,
    ) -> ModelResponse:
        """Get a response from the model.

        Args:
            system_instructions: The system instructions to use.
            input: The input items to the model, in OpenAI Responses format.
            model_settings: The model settings to use.
            tools: The tools available to the model.
            output_schema: The output schema to use.
            handoffs: The handoffs available to the model.
            tracing: Tracing configuration.
            previous_response_id: the ID of the previous response. Generally not used by the model,
                except for the OpenAI Responses API.
            conversation_id: The ID of the stored conversation, if any.
            prompt: The prompt config to use for the model.

        Returns:
            The full model response.
        """
        pass

    @abc.abstractmethod
    def stream_response(
        self,
        system_instructions: str | None,
        input: str | list[TResponseInputItem],
        model_settings: ModelSettings,
        tools: list[Tool],
        output_schema: AgentOutputSchemaBase | None,
        handoffs: list[Handoff],
        tracing: ModelTracing,
        *,
        previous_response_id: str | None,
        conversation_id: str | None,
        prompt: ResponsePromptParam | None,
    ) -> AsyncIterator[TResponseStreamEvent]:
        """Stream a response from the model.

        Args:
            system_instructions: The system instructions to use.
            input: The input items to the model, in OpenAI Responses format.
            model_settings: The model settings to use.
            tools: The tools available to the model.
            output_schema: The output schema to use.
            handoffs: The handoffs available to the model.
            tracing: Tracing configuration.
            previous_response_id: the ID of the previous response. Generally not used by the model,
                except for the OpenAI Responses API.
            conversation_id: The ID of the stored conversation, if any.
            prompt: The prompt config to use for the model.

        Returns:
            An iterator of response stream events, in OpenAI Responses format.
        """
        pass

close async

Release any resources held by the model.

Models that maintain persistent connections can override this. The default implementation is a no-op.

Source code in src/agents/models/interface.py
async def close(self) -> None:
    """Release any resources held by the model.

    Models that maintain persistent connections can override this. The default implementation
    is a no-op.
    """
    return None

get_response abstractmethod async

get_response(
    system_instructions: str | None,
    input: str | list[TResponseInputItem],
    model_settings: ModelSettings,
    tools: list[Tool],
    output_schema: AgentOutputSchemaBase | None,
    handoffs: list[Handoff],
    tracing: ModelTracing,
    *,
    previous_response_id: str | None,
    conversation_id: str | None,
    prompt: ResponsePromptParam | None,
) -> ModelResponse

Get a response from the model.

Parameters:

Name Type Description Default
system_instructions str | None

The system instructions to use.

required
input str | list[TResponseInputItem]

The input items to the model, in OpenAI Responses format.

required
model_settings ModelSettings

The model settings to use.

required
tools list[Tool]

The tools available to the model.

required
output_schema AgentOutputSchemaBase | None

The output schema to use.

required
handoffs list[Handoff]

The handoffs available to the model.

required
tracing ModelTracing

Tracing configuration.

required
previous_response_id str | None

the ID of the previous response. Generally not used by the model, except for the OpenAI Responses API.

required
conversation_id str | None

The ID of the stored conversation, if any.

required
prompt ResponsePromptParam | None

The prompt config to use for the model.

required

Returns:

Type Description
ModelResponse

The full model response.

Source code in src/agents/models/interface.py
@abc.abstractmethod
async def get_response(
    self,
    system_instructions: str | None,
    input: str | list[TResponseInputItem],
    model_settings: ModelSettings,
    tools: list[Tool],
    output_schema: AgentOutputSchemaBase | None,
    handoffs: list[Handoff],
    tracing: ModelTracing,
    *,
    previous_response_id: str | None,
    conversation_id: str | None,
    prompt: ResponsePromptParam | None,
) -> ModelResponse:
    """Get a response from the model.

    Args:
        system_instructions: The system instructions to use.
        input: The input items to the model, in OpenAI Responses format.
        model_settings: The model settings to use.
        tools: The tools available to the model.
        output_schema: The output schema to use.
        handoffs: The handoffs available to the model.
        tracing: Tracing configuration.
        previous_response_id: the ID of the previous response. Generally not used by the model,
            except for the OpenAI Responses API.
        conversation_id: The ID of the stored conversation, if any.
        prompt: The prompt config to use for the model.

    Returns:
        The full model response.
    """
    pass

stream_response abstractmethod

stream_response(
    system_instructions: str | None,
    input: str | list[TResponseInputItem],
    model_settings: ModelSettings,
    tools: list[Tool],
    output_schema: AgentOutputSchemaBase | None,
    handoffs: list[Handoff],
    tracing: ModelTracing,
    *,
    previous_response_id: str | None,
    conversation_id: str | None,
    prompt: ResponsePromptParam | None,
) -> AsyncIterator[TResponseStreamEvent]

Stream a response from the model.

Parameters:

Name Type Description Default
system_instructions str | None

The system instructions to use.

required
input str | list[TResponseInputItem]

The input items to the model, in OpenAI Responses format.

required
model_settings ModelSettings

The model settings to use.

required
tools list[Tool]

The tools available to the model.

required
output_schema AgentOutputSchemaBase | None

The output schema to use.

required
handoffs list[Handoff]

The handoffs available to the model.

required
tracing ModelTracing

Tracing configuration.

required
previous_response_id str | None

the ID of the previous response. Generally not used by the model, except for the OpenAI Responses API.

required
conversation_id str | None

The ID of the stored conversation, if any.

required
prompt ResponsePromptParam | None

The prompt config to use for the model.

required

Returns:

Type Description
AsyncIterator[TResponseStreamEvent]

An iterator of response stream events, in OpenAI Responses format.

Source code in src/agents/models/interface.py
@abc.abstractmethod
def stream_response(
    self,
    system_instructions: str | None,
    input: str | list[TResponseInputItem],
    model_settings: ModelSettings,
    tools: list[Tool],
    output_schema: AgentOutputSchemaBase | None,
    handoffs: list[Handoff],
    tracing: ModelTracing,
    *,
    previous_response_id: str | None,
    conversation_id: str | None,
    prompt: ResponsePromptParam | None,
) -> AsyncIterator[TResponseStreamEvent]:
    """Stream a response from the model.

    Args:
        system_instructions: The system instructions to use.
        input: The input items to the model, in OpenAI Responses format.
        model_settings: The model settings to use.
        tools: The tools available to the model.
        output_schema: The output schema to use.
        handoffs: The handoffs available to the model.
        tracing: Tracing configuration.
        previous_response_id: the ID of the previous response. Generally not used by the model,
            except for the OpenAI Responses API.
        conversation_id: The ID of the stored conversation, if any.
        prompt: The prompt config to use for the model.

    Returns:
        An iterator of response stream events, in OpenAI Responses format.
    """
    pass