Skip to content

Commit

Permalink
Fixed up requested changes by renaming the name of the mode and chang…
Browse files Browse the repository at this point in the history
…ing the parsing of the code itself
  • Loading branch information
ivanleomk committed Aug 21, 2024
1 parent 2774a17 commit ea1509b
Show file tree
Hide file tree
Showing 10 changed files with 15 additions and 14 deletions.
2 changes: 1 addition & 1 deletion docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ Now, let's see Instructor in action with a simple example:
from instructor import from_openai, Mode
from pydantic import BaseModel

client = from_openai(OpenAI(), mode=Mode.STRUCTURED_OUTPUTS)
client = from_openai(OpenAI(), mode=Mode.TOOLS_STRICT)


class User(BaseModel):
Expand Down
2 changes: 1 addition & 1 deletion instructor/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ def from_openai(
instructor.Mode.FUNCTIONS,
instructor.Mode.PARALLEL_TOOLS,
instructor.Mode.MD_JSON,
instructor.Mode.STRUCTURED_OUTPUTS,
instructor.Mode.TOOLS_STRICT,
}

if isinstance(client, openai.OpenAI):
Expand Down
4 changes: 2 additions & 2 deletions instructor/dsl/iterable.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def extract_json(
elif mode in {Mode.JSON, Mode.MD_JSON, Mode.JSON_SCHEMA}:
if json_chunk := chunk.choices[0].delta.content:
yield json_chunk
elif mode == Mode.TOOLS or mode == Mode.STRUCTURED_OUTPUTS:
elif mode in {Mode.TOOLS, Mode.TOOLS_STRICT}:
if json_chunk := chunk.choices[0].delta.tool_calls:
yield json_chunk[0].function.arguments
else:
Expand Down Expand Up @@ -123,7 +123,7 @@ async def extract_json_async(
elif mode in {Mode.JSON, Mode.MD_JSON, Mode.JSON_SCHEMA}:
if json_chunk := chunk.choices[0].delta.content:
yield json_chunk
elif mode == Mode.TOOLS or mode == Mode.STRUCTURED_OUTPUTS:
elif mode in {Mode.TOOLS, Mode.TOOLS_STRICT}:
if json_chunk := chunk.choices[0].delta.tool_calls:
yield json_chunk[0].function.arguments
else:
Expand Down
4 changes: 2 additions & 2 deletions instructor/dsl/partial.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def extract_json(
elif mode in {Mode.JSON, Mode.MD_JSON, Mode.JSON_SCHEMA}:
if json_chunk := chunk.choices[0].delta.content:
yield json_chunk
elif mode == Mode.TOOLS or mode == Mode.STRUCTURED_OUTPUTS:
elif mode in {Mode.TOOLS, Mode.TOOLS_STRICT}:
if json_chunk := chunk.choices[0].delta.tool_calls:
yield json_chunk[0].function.arguments
else:
Expand Down Expand Up @@ -199,7 +199,7 @@ async def extract_json_async(
elif mode in {Mode.JSON, Mode.MD_JSON, Mode.JSON_SCHEMA}:
if json_chunk := chunk.choices[0].delta.content:
yield json_chunk
elif mode == Mode.TOOLS or mode == Mode.STRUCTURED_OUTPUTS:
elif mode in {Mode.TOOLS, Mode.TOOLS_STRICT}:
if json_chunk := chunk.choices[0].delta.tool_calls:
yield json_chunk[0].function.arguments
else:
Expand Down
7 changes: 4 additions & 3 deletions instructor/function_calls.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def from_response(
Mode.warn_mode_functions_deprecation()
return cls.parse_functions(completion, validation_context, strict)

if mode in {Mode.TOOLS, Mode.MISTRAL_TOOLS, Mode.STRUCTURED_OUTPUTS}:
if mode in {Mode.TOOLS, Mode.MISTRAL_TOOLS, Mode.TOOLS_STRICT}:
return cls.parse_tools(completion, validation_context, strict)

if mode in {Mode.JSON, Mode.JSON_SCHEMA, Mode.MD_JSON}:
Expand Down Expand Up @@ -294,7 +294,8 @@ def parse_anthropic_tools(
strict: Optional[bool] = None,
) -> BaseModel:
from anthropic.types import Message
if isinstance(completion, Message) and completion.stop_reason == 'max_tokens':

if isinstance(completion, Message) and completion.stop_reason == "max_tokens":
raise IncompleteOutputException(last_completion=completion)

# Anthropic returns arguments as a dict, dump to json for model validation below
Expand Down Expand Up @@ -322,7 +323,7 @@ def parse_anthropic_json(

assert isinstance(completion, Message)

if completion.stop_reason == 'max_tokens':
if completion.stop_reason == "max_tokens":
raise IncompleteOutputException(last_completion=completion)

text = completion.content[0].text
Expand Down
2 changes: 1 addition & 1 deletion instructor/mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class Mode(enum.Enum):
VERTEXAI_JSON = "vertexai_json"
GEMINI_JSON = "gemini_json"
COHERE_JSON_SCHEMA = "json_object"
STRUCTURED_OUTPUTS = "structured_output"
TOOLS_STRICT = "tools_strict"

@classmethod
def warn_mode_functions_deprecation(cls):
Expand Down
2 changes: 1 addition & 1 deletion instructor/process_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def handle_response_model(
Mode.warn_mode_functions_deprecation()
new_kwargs["functions"] = [response_model.openai_schema]
new_kwargs["function_call"] = {"name": response_model.openai_schema["name"]}
elif mode in {Mode.STRUCTURED_OUTPUTS}:
elif mode == Mode.TOOLS_STRICT:
response_model_schema = pydantic_function_tool(response_model)
response_model_schema["function"]["strict"] = True
new_kwargs["tools"] = [response_model_schema]
Expand Down
2 changes: 1 addition & 1 deletion instructor/retry.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def reask_messages(response: ChatCompletion, mode: Mode, exception: Exception):

yield dump_message(response.choices[0].message)
# TODO: Give users more control on configuration
if mode == Mode.TOOLS or mode == Mode.STRUCTURED_OUTPUTS:
if mode in {Mode.TOOLS, Mode.TOOLS_STRICT}:
for tool_call in response.choices[0].message.tool_calls:
yield {
"role": "tool",
Expand Down
2 changes: 1 addition & 1 deletion tests/llm/test_openai/test_validators.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class Response(BaseModel):
def test_runmodel_validator_error(model, mode, client):
client = instructor.from_openai(client, mode=mode)

if mode == instructor.Mode.STRUCTURED_OUTPUTS:
if mode == instructor.Mode.TOOLS_STRICT:
# TODO: Structured outputs currently doesn't support the concept of Validators ( This is Pydantic specific ) so perhaps come back to this later
pytest.skip("Skipping test for structured output")

Expand Down
2 changes: 1 addition & 1 deletion tests/llm/test_openai/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@
models = ["gpt-4o-mini"]
modes = [
instructor.Mode.TOOLS,
instructor.Mode.STRUCTURED_OUTPUTS,
instructor.Mode.TOOLS_STRICT,
]

0 comments on commit ea1509b

Please sign in to comment.