diff --git a/instructor/process_response.py b/instructor/process_response.py index ba070f952..65c5fd223 100644 --- a/instructor/process_response.py +++ b/instructor/process_response.py @@ -10,7 +10,6 @@ from instructor.dsl.simple_type import AdapterBase, ModelAdapter, is_simple_type from instructor.function_calls import OpenAISchema, openai_schema from instructor.utils import merge_consecutive_messages -from instructor.validators import AsyncValidationError from openai.types.chat import ChatCompletion from pydantic import BaseModel, create_model import json @@ -81,11 +80,6 @@ async def process_response_async( mode=mode, ) - if isinstance(model, OpenAISchema): - validation_errors = await model.model_async_validate(validation_context) - if validation_errors: - raise AsyncValidationError(f"Validation errors: {validation_errors}") - # ? This really hints at the fact that we need a better way of # ? attaching usage data and the raw response to the model we return. if isinstance(model, IterableBase): @@ -153,12 +147,6 @@ def process_response( mode=mode, ) - if isinstance(model, OpenAISchema): - if model.has_async_validators(): - logging.warning( - "Async Validators will not run in a synchronous client. Please make sure to use an Async client" - ) - # ? This really hints at the fact that we need a better way of # ? attaching usage data and the raw response to the model we return. if isinstance(model, IterableBase):