diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7991b3e7c7..8067386d5f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,6 +17,7 @@ jobs: timeout-minutes: 10 name: lint runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -34,10 +35,10 @@ jobs: - name: Run lints run: ./scripts/lint - upload: - if: github.repository == 'stainless-sdks/openai-python' + build: + if: github.repository == 'stainless-sdks/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) timeout-minutes: 10 - name: upload + name: build permissions: contents: read id-token: write @@ -45,6 +46,20 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run build + run: rye build + - name: Get GitHub OIDC Token id: github-oidc uses: actions/github-script@v6 @@ -62,6 +77,7 @@ jobs: timeout-minutes: 10 name: test runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -83,7 +99,7 @@ jobs: timeout-minutes: 10 name: examples runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} - if: github.repository == 'openai/openai-python' + if: github.repository == 'openai/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) steps: - uses: actions/checkout@v4 diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3ceb8e2f5b..daa7a2a062 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.93.0" + ".": "1.93.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 3274b67105..68e5c04ebc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## 1.93.1 (2025-07-04) + +Full Changelog: [v1.93.0...v1.93.1](https://github.com/openai/openai-python/compare/v1.93.0...v1.93.1) + +### Bug Fixes + +* **ci:** correct conditional ([de6a9ce](https://github.com/openai/openai-python/commit/de6a9ce078731d60b0bdc42a9322548c575f11a3)) +* **responses:** add missing arguments to parse ([05590ec](https://github.com/openai/openai-python/commit/05590ec2a96399afd05baf5a3ee1d9a744f09c40)) + + +### Chores + +* **ci:** change upload type ([cd4aa88](https://github.com/openai/openai-python/commit/cd4aa889c50581d861728c9606327992485f0d0d)) +* **ci:** only run for pushes and fork pull requests ([f89c7eb](https://github.com/openai/openai-python/commit/f89c7eb46c6f081254715d75543cbee3ffa83822)) +* **tests:** ensure parse method is in sync with create ([4f58e18](https://github.com/openai/openai-python/commit/4f58e187c12dc8b2c33e9cca284b0429e5cc4de5)) + ## 1.93.0 (2025-06-27) Full Changelog: [v1.92.3...v1.93.0](https://github.com/openai/openai-python/compare/v1.92.3...v1.93.0) diff --git a/pyproject.toml b/pyproject.toml index 0a3e3e1ca8..73efe65b2f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.93.0" +version = "1.93.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 75198de98f..cd522975fc 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash set -exuo pipefail -RESPONSE=$(curl -X POST "$URL" \ +FILENAME=$(basename dist/*.whl) + +RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \ -H "Authorization: Bearer $AUTH" \ -H "Content-Type: application/json") @@ -12,13 +14,13 @@ if [[ "$SIGNED_URL" == "null" ]]; then exit 1 fi -UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ - -H "Content-Type: application/gzip" \ - --data-binary @- "$SIGNED_URL" 2>&1) +UPLOAD_RESPONSE=$(curl -v -X PUT \ + -H "Content-Type: binary/octet-stream" \ + --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1) if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/openai-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/openai-python/$SHA/$FILENAME'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 diff --git a/src/openai/_version.py b/src/openai/_version.py index 84c3a45a00..289693a91c 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.93.0" # x-release-please-version +__version__ = "1.93.1" # x-release-please-version diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index aaf2088f38..ce132bdb05 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -943,22 +943,27 @@ def stream( def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -991,21 +996,26 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "/responses", body=maybe_transform( { - "input": input, - "model": model, + "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, "reasoning": reasoning, + "service_tier": service_tier, "store": store, "stream": stream, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, @@ -2202,22 +2212,27 @@ def stream( async def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -2250,21 +2265,26 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "/responses", body=maybe_transform( { - "input": input, - "model": model, + "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, "reasoning": reasoning, + "service_tier": service_tier, "store": store, "stream": stream, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 9c76928c8c..158654ee70 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -9,6 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type +from openai._utils import assert_signatures_in_sync from openai.types.responses import ( Response, ) @@ -340,6 +341,17 @@ def test_path_params_cancel(self, client: OpenAI) -> None: ) +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: + checking_client: OpenAI | AsyncOpenAI = client if sync else async_client + + assert_signatures_in_sync( + checking_client.responses.create, + checking_client.responses.parse, + exclude_params={"stream", "tools"}, + ) + + class TestAsyncResponses: parametrize = pytest.mark.parametrize( "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]