diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3c4a087..99ec9fb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,12 +1,14 @@ name: CI on: push: - branches-ignore: - - 'generated' - - 'codegen/**' - - 'integrated/**' - - 'stl-preview-head/**' - - 'stl-preview-base/**' + branches: + - '**' + - '!integrated/**' + - '!stl-preview-head/**' + - '!stl-preview-base/**' + - '!generated' + - '!codegen/**' + - 'codegen/stl/**' pull_request: branches-ignore: - 'stl-preview-head/**' @@ -17,7 +19,7 @@ jobs: timeout-minutes: 10 name: lint runs-on: ${{ github.repository == 'stainless-sdks/parallel-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} - if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata') steps: - uses: actions/checkout@v6 @@ -36,7 +38,7 @@ jobs: run: ./scripts/lint build: - if: github.event_name == 'push' || github.event.pull_request.head.repo.fork + if: (github.event_name == 'push' || github.event.pull_request.head.repo.fork) && (github.event_name != 'push' || github.event.head_commit.message != 'codegen metadata') timeout-minutes: 10 name: build permissions: diff --git a/.gitignore b/.gitignore index 95ceb18..3824f4c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .prism.log +.stdy.log _dev __pycache__ diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 980ea05..2aca35a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.4.2" + ".": "0.5.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index d30019d..6cfd973 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 22 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-970b780e86490322cc3c7e2b57f140ca6766a3d9f6e0d3402837ebaf7c2183fc.yml -openapi_spec_hash: 34f784ce2dec796048e6780924bae08f -config_hash: a398d153133d8884bed4e5256a0ae818 +configured_endpoints: 23 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/parallel-web%2Fparallel-sdk-44048676c9b07d49ed9dbee5fad53d145eddaea5ba682b6557681c5a7e04f8ed.yml +openapi_spec_hash: e239787937742b1bc15e7f211fe3c518 +config_hash: fe820a5a10ee48e143c9e49a153b23b4 diff --git a/CHANGELOG.md b/CHANGELOG.md index a52aa9d..f946731 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,44 @@ # Changelog +## 0.5.0 (2026-04-12) + +Full Changelog: [v0.4.2...v0.5.0](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.2...v0.5.0) + +### Features + +* **api:** Add Search and Extract v1 and associated types ([ea487f3](https://github.com/parallel-web/parallel-sdk-python/commit/ea487f32b73aee955f662d1fa225841421ce1ba3)) +* **api:** manual - add AdvancedSearchSettings and AdvancedExtractSettings models ([5836a6f](https://github.com/parallel-web/parallel-sdk-python/commit/5836a6fbe8db3a3509556442067f087918edb2bb)) +* **api:** Remove full_content from OpenAPI Spec ([7a4d651](https://github.com/parallel-web/parallel-sdk-python/commit/7a4d651c3e9f35334175f82daaf6392e9f76dee5)) +* **api:** Update OpenAPI spec ([fae95f4](https://github.com/parallel-web/parallel-sdk-python/commit/fae95f4f2c7cb60ebc0babc4fe540617e3334b2d)) +* **internal:** implement indices array format for query and form serialization ([3df5972](https://github.com/parallel-web/parallel-sdk-python/commit/3df5972e34c9aa1709eabc4eb5b8cbbc0adccae2)) + + +### Bug Fixes + +* **client:** preserve hardcoded query params when merging with user params ([08080bc](https://github.com/parallel-web/parallel-sdk-python/commit/08080bc22c415881cc9f9b05bc22f09ab83c7e8d)) +* **deps:** bump minimum typing-extensions version ([964a46d](https://github.com/parallel-web/parallel-sdk-python/commit/964a46ddfc9ead64e4105e42192a780bc91716b0)) +* ensure file data are only sent as 1 parameter ([1c15cc0](https://github.com/parallel-web/parallel-sdk-python/commit/1c15cc00b1ae223db8e51893ce23b9a2193e3ae7)) +* **pydantic:** do not pass `by_alias` unless set ([f0793c1](https://github.com/parallel-web/parallel-sdk-python/commit/f0793c171465dd57d0fbf82a3bb2281d046f500e)) +* sanitize endpoint path params ([5931597](https://github.com/parallel-web/parallel-sdk-python/commit/59315972d27246485be5cb52671aecaa3aa46253)) + + +### Chores + +* **ci:** skip lint on metadata-only changes ([403448c](https://github.com/parallel-web/parallel-sdk-python/commit/403448c7760e70fe0f4b3998a20f048910e91cd6)) +* **internal:** tweak CI branches ([014c802](https://github.com/parallel-web/parallel-sdk-python/commit/014c80287318df0db6207df1579be00c4717f24d)) +* **internal:** update gitignore ([1f4f6b0](https://github.com/parallel-web/parallel-sdk-python/commit/1f4f6b0e5d2a46e1a5457879e937ea5aa551073c)) +* **tests:** bump steady to v0.19.4 ([ebee2e7](https://github.com/parallel-web/parallel-sdk-python/commit/ebee2e761e2a8587cc6aa4c2decfd6310092b039)) +* **tests:** bump steady to v0.19.5 ([2774099](https://github.com/parallel-web/parallel-sdk-python/commit/2774099f753bc0826e9c6b6e9fbb40d4e72e3405)) +* **tests:** bump steady to v0.19.6 ([8e3ee3d](https://github.com/parallel-web/parallel-sdk-python/commit/8e3ee3d04dc149b2bcedb0e4acd92474fafd8d05)) +* **tests:** bump steady to v0.19.7 ([4bcf12e](https://github.com/parallel-web/parallel-sdk-python/commit/4bcf12e670b7997e23ade3d991711fe1ef741e35)) +* **tests:** bump steady to v0.20.1 ([d82ce60](https://github.com/parallel-web/parallel-sdk-python/commit/d82ce601c5687553b0e96990e418289fd8a14e00)) +* **tests:** bump steady to v0.20.2 ([746ca39](https://github.com/parallel-web/parallel-sdk-python/commit/746ca39c749f899dc9137a2f9be5de9aa39210c6)) + + +### Refactors + +* **tests:** switch from prism to steady ([032745e](https://github.com/parallel-web/parallel-sdk-python/commit/032745ea1a03b3d2516b789a28a3c8b8034660d8)) + ## 0.4.2 (2026-03-09) Full Changelog: [v0.4.1...v0.4.2](https://github.com/parallel-web/parallel-sdk-python/compare/v0.4.1...v0.4.2) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3276e79..1ecb266 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -85,7 +85,7 @@ $ pip install ./path-to-wheel-file.whl ## Running tests -Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. +Most tests require you to [set up a mock server](https://github.com/dgellow/steady) against the OpenAPI spec to run the tests. ```sh $ ./scripts/mock diff --git a/api.md b/api.md index 66e3651..6462851 100644 --- a/api.md +++ b/api.md @@ -4,6 +4,30 @@ from parallel.types import ErrorObject, ErrorResponse, SourcePolicy, Warning ``` +# Parallel + +Types: + +```python +from parallel.types import ( + AdvancedExtractSettings, + AdvancedSearchSettings, + ExcerptSettings, + ExtractError, + ExtractResponse, + ExtractResult, + FetchPolicy, + SearchResult, + UsageItem, + WebSearchResult, +) +``` + +Methods: + +- client.extract(\*\*params) -> ExtractResponse +- client.search(\*\*params) -> SearchResult + # TaskRun Types: diff --git a/pyproject.toml b/pyproject.toml index 12646c4..b55996c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "parallel-web" -version = "0.4.2" +version = "0.5.0" description = "The official Python library for the Parallel API" dynamic = ["readme"] license = "MIT" @@ -11,7 +11,7 @@ authors = [ dependencies = [ "httpx>=0.23.0, <1", "pydantic>=1.9.0, <3", - "typing-extensions>=4.10, <5", + "typing-extensions>=4.14, <5", "anyio>=3.5.0, <5", "distro>=1.7.0, <2", "sniffio", diff --git a/scripts/mock b/scripts/mock index bcf3b39..5cd7c15 100755 --- a/scripts/mock +++ b/scripts/mock @@ -19,34 +19,34 @@ fi echo "==> Starting mock server with URL ${URL}" -# Run prism mock on the given spec +# Run steady mock on the given spec if [ "$1" == "--daemon" ]; then # Pre-install the package so the download doesn't eat into the startup timeout - npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism --version + npm exec --package=@stdy/cli@0.20.2 -- steady --version - npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" &> .stdy.log & - # Wait for server to come online (max 30s) + # Wait for server to come online via health endpoint (max 30s) echo -n "Waiting for server" attempts=0 - while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + while ! curl --silent --fail "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1; do + if ! kill -0 $! 2>/dev/null; then + echo + cat .stdy.log + exit 1 + fi attempts=$((attempts + 1)) if [ "$attempts" -ge 300 ]; then echo - echo "Timed out waiting for Prism server to start" - cat .prism.log + echo "Timed out waiting for Steady server to start" + cat .stdy.log exit 1 fi echo -n "." sleep 0.1 done - if grep -q "✖ fatal" ".prism.log"; then - cat .prism.log - exit 1 - fi - echo else - npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" + npm exec --package=@stdy/cli@0.20.2 -- steady --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets "$URL" fi diff --git a/scripts/test b/scripts/test index dbeda2d..b8143aa 100755 --- a/scripts/test +++ b/scripts/test @@ -9,8 +9,8 @@ GREEN='\033[0;32m' YELLOW='\033[0;33m' NC='\033[0m' # No Color -function prism_is_running() { - curl --silent "http://localhost:4010" >/dev/null 2>&1 +function steady_is_running() { + curl --silent "http://127.0.0.1:4010/_x-steady/health" >/dev/null 2>&1 } kill_server_on_port() { @@ -25,7 +25,7 @@ function is_overriding_api_base_url() { [ -n "$TEST_API_BASE_URL" ] } -if ! is_overriding_api_base_url && ! prism_is_running ; then +if ! is_overriding_api_base_url && ! steady_is_running ; then # When we exit this script, make sure to kill the background mock server process trap 'kill_server_on_port 4010' EXIT @@ -36,19 +36,19 @@ fi if is_overriding_api_base_url ; then echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" echo -elif ! prism_is_running ; then - echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" +elif ! steady_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Steady server" echo -e "running against your OpenAPI spec." echo echo -e "To run the server, pass in the path or url of your OpenAPI" - echo -e "spec to the prism command:" + echo -e "spec to the steady command:" echo - echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}" + echo -e " \$ ${YELLOW}npm exec --package=@stdy/cli@0.20.2 -- steady path/to/your.openapi.yml --host 127.0.0.1 -p 4010 --validator-query-array-format=comma --validator-form-array-format=comma --validator-query-object-format=brackets --validator-form-object-format=brackets${NC}" echo exit 1 else - echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo -e "${GREEN}✔ Mock steady server is running with your OpenAPI spec${NC}" echo fi diff --git a/src/parallel/_base_client.py b/src/parallel/_base_client.py index 5128667..b283b92 100644 --- a/src/parallel/_base_client.py +++ b/src/parallel/_base_client.py @@ -540,6 +540,10 @@ def _build_request( files = cast(HttpxRequestFiles, ForceMultipartDict()) prepared_url = self._prepare_url(options.url) + # preserve hard-coded query params from the url + if params and prepared_url.query: + params = {**dict(prepared_url.params.items()), **params} + prepared_url = prepared_url.copy_with(raw_path=prepared_url.raw_path.split(b"?", 1)[0]) if "_" in prepared_url.host: # work around https://github.com/encode/httpx/discussions/2880 kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} diff --git a/src/parallel/_client.py b/src/parallel/_client.py index cf3f898..7a007e9 100644 --- a/src/parallel/_client.py +++ b/src/parallel/_client.py @@ -3,32 +3,54 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, Mapping -from typing_extensions import Self, override +from typing import TYPE_CHECKING, Any, Mapping, Optional +from typing_extensions import Self, Literal, override import httpx from . import _exceptions from ._qs import Querystring +from .types import client_search_params, client_extract_params from ._types import ( + Body, Omit, + Query, + Headers, Timeout, NotGiven, Transport, ProxiesTypes, RequestOptions, + SequenceNotStr, + omit, not_given, ) -from ._utils import is_given, get_async_library +from ._utils import ( + is_given, + maybe_transform, + get_async_library, + async_maybe_transform, +) from ._compat import cached_property from ._version import __version__ +from ._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import ParallelError, APIStatusError from ._base_client import ( DEFAULT_MAX_RETRIES, SyncAPIClient, AsyncAPIClient, + make_request_options, ) +from .types.search_result import SearchResult +from .types.extract_response import ExtractResponse +from .types.advanced_search_settings_param import AdvancedSearchSettingsParam +from .types.advanced_extract_settings_param import AdvancedExtractSettingsParam if TYPE_CHECKING: from .resources import beta, task_run @@ -108,6 +130,13 @@ def task_run(self) -> TaskRunResource: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ from .resources.task_run import TaskRunResource @@ -198,6 +227,139 @@ def copy( # client.with_options(timeout=10).foo.create(...) with_options = copy + def extract( + self, + *, + urls: SequenceNotStr[str], + advanced: Optional[AdvancedExtractSettingsParam] | Omit = omit, + client_model: Optional[str] | Omit = omit, + max_chars_total: Optional[int] | Omit = omit, + objective: Optional[str] | Omit = omit, + search_queries: Optional[SequenceNotStr[str]] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ExtractResponse: + """ + Extracts relevant content from specific web URLs. + + Args: + urls: URLs to extract content from. Up to 20 URLs. + + advanced: Advanced extract configuration. + + client_model: The model generating this request and consuming the results. Enables + optimizations and tailors default settings for the model's capabilities. + + max_chars_total: Upper bound on total characters across excerpts from all extracted results. Does + not affect full_content if requested. Default is dynamic based on urls, + objective, and client_model. + + objective: As in SearchRequest, a natural-language description of the underlying question + or goal driving the request. Used together with search_queries to focus excerpts + on the most relevant content. + + search_queries: Optional keyword search queries, as in SearchRequest. Used together with + objective to focus excerpts on the most relevant content. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self.post( + "/v1/extract", + body=maybe_transform( + { + "urls": urls, + "advanced": advanced, + "client_model": client_model, + "max_chars_total": max_chars_total, + "objective": objective, + "search_queries": search_queries, + }, + client_extract_params.ClientExtractParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ExtractResponse, + ) + + def search( + self, + *, + search_queries: SequenceNotStr[str], + advanced: Optional[AdvancedSearchSettingsParam] | Omit = omit, + client_model: Optional[str] | Omit = omit, + max_chars_total: Optional[int] | Omit = omit, + mode: Optional[Literal["basic", "standard"]] | Omit = omit, + objective: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SearchResult: + """ + Searches the web. + + Args: + search_queries: Concise keyword search queries, 3-6 words each. At least one query is required, + provide 2-3 for best results. Used together with objective to focus results on + the most relevant content. + + advanced: Advanced search configuration. + + client_model: The model generating this request and consuming the results. Enables + optimizations and tailors default settings for the model's capabilities. + + max_chars_total: Upper bound on total characters across excerpts from all results. Default is + dynamic based on search_queries, objective, and client_model. + + mode: Search mode preset: supported values are basic and standard. Basic mode offers + the lowest latency and works best with 2-3 high-quality search_queries. Standard + mode provides higher quality with more advanced retrieval and compression. + + objective: Natural-language description of the underlying question or goal driving the + search. Used together with search_queries to focus results on the most relevant + content. Should be self-contained with enough context to understand the intent + of the search. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self.post( + "/v1/search", + body=maybe_transform( + { + "search_queries": search_queries, + "advanced": advanced, + "client_model": client_model, + "max_chars_total": max_chars_total, + "mode": mode, + "objective": objective, + }, + client_search_params.ClientSearchParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SearchResult, + ) + @override def _make_status_error( self, @@ -293,6 +455,13 @@ def task_run(self) -> AsyncTaskRunResource: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ from .resources.task_run import AsyncTaskRunResource @@ -383,6 +552,139 @@ def copy( # client.with_options(timeout=10).foo.create(...) with_options = copy + async def extract( + self, + *, + urls: SequenceNotStr[str], + advanced: Optional[AdvancedExtractSettingsParam] | Omit = omit, + client_model: Optional[str] | Omit = omit, + max_chars_total: Optional[int] | Omit = omit, + objective: Optional[str] | Omit = omit, + search_queries: Optional[SequenceNotStr[str]] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> ExtractResponse: + """ + Extracts relevant content from specific web URLs. + + Args: + urls: URLs to extract content from. Up to 20 URLs. + + advanced: Advanced extract configuration. + + client_model: The model generating this request and consuming the results. Enables + optimizations and tailors default settings for the model's capabilities. + + max_chars_total: Upper bound on total characters across excerpts from all extracted results. Does + not affect full_content if requested. Default is dynamic based on urls, + objective, and client_model. + + objective: As in SearchRequest, a natural-language description of the underlying question + or goal driving the request. Used together with search_queries to focus excerpts + on the most relevant content. + + search_queries: Optional keyword search queries, as in SearchRequest. Used together with + objective to focus excerpts on the most relevant content. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self.post( + "/v1/extract", + body=await async_maybe_transform( + { + "urls": urls, + "advanced": advanced, + "client_model": client_model, + "max_chars_total": max_chars_total, + "objective": objective, + "search_queries": search_queries, + }, + client_extract_params.ClientExtractParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ExtractResponse, + ) + + async def search( + self, + *, + search_queries: SequenceNotStr[str], + advanced: Optional[AdvancedSearchSettingsParam] | Omit = omit, + client_model: Optional[str] | Omit = omit, + max_chars_total: Optional[int] | Omit = omit, + mode: Optional[Literal["basic", "standard"]] | Omit = omit, + objective: Optional[str] | Omit = omit, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = not_given, + ) -> SearchResult: + """ + Searches the web. + + Args: + search_queries: Concise keyword search queries, 3-6 words each. At least one query is required, + provide 2-3 for best results. Used together with objective to focus results on + the most relevant content. + + advanced: Advanced search configuration. + + client_model: The model generating this request and consuming the results. Enables + optimizations and tailors default settings for the model's capabilities. + + max_chars_total: Upper bound on total characters across excerpts from all results. Default is + dynamic based on search_queries, objective, and client_model. + + mode: Search mode preset: supported values are basic and standard. Basic mode offers + the lowest latency and works best with 2-3 high-quality search_queries. Standard + mode provides higher quality with more advanced retrieval and compression. + + objective: Natural-language description of the underlying question or goal driving the + search. Used together with search_queries to focus results on the most relevant + content. Should be self-contained with enough context to understand the intent + of the search. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self.post( + "/v1/search", + body=await async_maybe_transform( + { + "search_queries": search_queries, + "advanced": advanced, + "client_model": client_model, + "max_chars_total": max_chars_total, + "mode": mode, + "objective": objective, + }, + client_search_params.ClientSearchParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SearchResult, + ) + @override def _make_status_error( self, @@ -423,12 +725,26 @@ class ParallelWithRawResponse: def __init__(self, client: Parallel) -> None: self._client = client + self.extract = to_raw_response_wrapper( + client.extract, + ) + self.search = to_raw_response_wrapper( + client.search, + ) + @cached_property def task_run(self) -> task_run.TaskRunResourceWithRawResponse: """The Task API executes web research and extraction tasks. Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ from .resources.task_run import TaskRunResourceWithRawResponse @@ -447,12 +763,26 @@ class AsyncParallelWithRawResponse: def __init__(self, client: AsyncParallel) -> None: self._client = client + self.extract = async_to_raw_response_wrapper( + client.extract, + ) + self.search = async_to_raw_response_wrapper( + client.search, + ) + @cached_property def task_run(self) -> task_run.AsyncTaskRunResourceWithRawResponse: """The Task API executes web research and extraction tasks. Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ from .resources.task_run import AsyncTaskRunResourceWithRawResponse @@ -471,12 +801,26 @@ class ParallelWithStreamedResponse: def __init__(self, client: Parallel) -> None: self._client = client + self.extract = to_streamed_response_wrapper( + client.extract, + ) + self.search = to_streamed_response_wrapper( + client.search, + ) + @cached_property def task_run(self) -> task_run.TaskRunResourceWithStreamingResponse: """The Task API executes web research and extraction tasks. Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ from .resources.task_run import TaskRunResourceWithStreamingResponse @@ -495,12 +839,26 @@ class AsyncParallelWithStreamedResponse: def __init__(self, client: AsyncParallel) -> None: self._client = client + self.extract = async_to_streamed_response_wrapper( + client.extract, + ) + self.search = async_to_streamed_response_wrapper( + client.search, + ) + @cached_property def task_run(self) -> task_run.AsyncTaskRunResourceWithStreamingResponse: """The Task API executes web research and extraction tasks. Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ from .resources.task_run import AsyncTaskRunResourceWithStreamingResponse diff --git a/src/parallel/_compat.py b/src/parallel/_compat.py index 020ffeb..340c91a 100644 --- a/src/parallel/_compat.py +++ b/src/parallel/_compat.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload from datetime import date, datetime -from typing_extensions import Self, Literal +from typing_extensions import Self, Literal, TypedDict import pydantic from pydantic.fields import FieldInfo @@ -131,6 +131,10 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: return model.model_dump_json(indent=indent) +class _ModelDumpKwargs(TypedDict, total=False): + by_alias: bool + + def model_dump( model: pydantic.BaseModel, *, @@ -142,6 +146,9 @@ def model_dump( by_alias: bool | None = None, ) -> dict[str, Any]: if (not PYDANTIC_V1) or hasattr(model, "model_dump"): + kwargs: _ModelDumpKwargs = {} + if by_alias is not None: + kwargs["by_alias"] = by_alias return model.model_dump( mode=mode, exclude=exclude, @@ -149,7 +156,7 @@ def model_dump( exclude_defaults=exclude_defaults, # warnings are not supported in Pydantic v1 warnings=True if PYDANTIC_V1 else warnings, - by_alias=by_alias, + **kwargs, ) return cast( "dict[str, Any]", diff --git a/src/parallel/_qs.py b/src/parallel/_qs.py index ada6fd3..de8c99b 100644 --- a/src/parallel/_qs.py +++ b/src/parallel/_qs.py @@ -101,7 +101,10 @@ def _stringify_item( items.extend(self._stringify_item(key, item, opts)) return items elif array_format == "indices": - raise NotImplementedError("The array indices format is not supported yet") + items = [] + for i, item in enumerate(value): + items.extend(self._stringify_item(f"{key}[{i}]", item, opts)) + return items elif array_format == "brackets": items = [] key = key + "[]" diff --git a/src/parallel/_utils/__init__.py b/src/parallel/_utils/__init__.py index b70d7b2..f1aef8a 100644 --- a/src/parallel/_utils/__init__.py +++ b/src/parallel/_utils/__init__.py @@ -1,3 +1,4 @@ +from ._path import path_template as path_template from ._sync import asyncify as asyncify from ._proxy import LazyProxy as LazyProxy from ._utils import ( diff --git a/src/parallel/_utils/_path.py b/src/parallel/_utils/_path.py new file mode 100644 index 0000000..4d6e1e4 --- /dev/null +++ b/src/parallel/_utils/_path.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +import re +from typing import ( + Any, + Mapping, + Callable, +) +from urllib.parse import quote + +# Matches '.' or '..' where each dot is either literal or percent-encoded (%2e / %2E). +_DOT_SEGMENT_RE = re.compile(r"^(?:\.|%2[eE]){1,2}$") + +_PLACEHOLDER_RE = re.compile(r"\{(\w+)\}") + + +def _quote_path_segment_part(value: str) -> str: + """Percent-encode `value` for use in a URI path segment. + + Considers characters not in `pchar` set from RFC 3986 §3.3 to be unsafe. + https://datatracker.ietf.org/doc/html/rfc3986#section-3.3 + """ + # quote() already treats unreserved characters (letters, digits, and -._~) + # as safe, so we only need to add sub-delims, ':', and '@'. + # Notably, unlike the default `safe` for quote(), / is unsafe and must be quoted. + return quote(value, safe="!$&'()*+,;=:@") + + +def _quote_query_part(value: str) -> str: + """Percent-encode `value` for use in a URI query string. + + Considers &, = and characters not in `query` set from RFC 3986 §3.4 to be unsafe. + https://datatracker.ietf.org/doc/html/rfc3986#section-3.4 + """ + return quote(value, safe="!$'()*+,;:@/?") + + +def _quote_fragment_part(value: str) -> str: + """Percent-encode `value` for use in a URI fragment. + + Considers characters not in `fragment` set from RFC 3986 §3.5 to be unsafe. + https://datatracker.ietf.org/doc/html/rfc3986#section-3.5 + """ + return quote(value, safe="!$&'()*+,;=:@/?") + + +def _interpolate( + template: str, + values: Mapping[str, Any], + quoter: Callable[[str], str], +) -> str: + """Replace {name} placeholders in `template`, quoting each value with `quoter`. + + Placeholder names are looked up in `values`. + + Raises: + KeyError: If a placeholder is not found in `values`. + """ + # re.split with a capturing group returns alternating + # [text, name, text, name, ..., text] elements. + parts = _PLACEHOLDER_RE.split(template) + + for i in range(1, len(parts), 2): + name = parts[i] + if name not in values: + raise KeyError(f"a value for placeholder {{{name}}} was not provided") + val = values[name] + if val is None: + parts[i] = "null" + elif isinstance(val, bool): + parts[i] = "true" if val else "false" + else: + parts[i] = quoter(str(values[name])) + + return "".join(parts) + + +def path_template(template: str, /, **kwargs: Any) -> str: + """Interpolate {name} placeholders in `template` from keyword arguments. + + Args: + template: The template string containing {name} placeholders. + **kwargs: Keyword arguments to interpolate into the template. + + Returns: + The template with placeholders interpolated and percent-encoded. + + Safe characters for percent-encoding are dependent on the URI component. + Placeholders in path and fragment portions are percent-encoded where the `segment` + and `fragment` sets from RFC 3986 respectively are considered safe. + Placeholders in the query portion are percent-encoded where the `query` set from + RFC 3986 §3.3 is considered safe except for = and & characters. + + Raises: + KeyError: If a placeholder is not found in `kwargs`. + ValueError: If resulting path contains /./ or /../ segments (including percent-encoded dot-segments). + """ + # Split the template into path, query, and fragment portions. + fragment_template: str | None = None + query_template: str | None = None + + rest = template + if "#" in rest: + rest, fragment_template = rest.split("#", 1) + if "?" in rest: + rest, query_template = rest.split("?", 1) + path_template = rest + + # Interpolate each portion with the appropriate quoting rules. + path_result = _interpolate(path_template, kwargs, _quote_path_segment_part) + + # Reject dot-segments (. and ..) in the final assembled path. The check + # runs after interpolation so that adjacent placeholders or a mix of static + # text and placeholders that together form a dot-segment are caught. + # Also reject percent-encoded dot-segments to protect against incorrectly + # implemented normalization in servers/proxies. + for segment in path_result.split("/"): + if _DOT_SEGMENT_RE.match(segment): + raise ValueError(f"Constructed path {path_result!r} contains dot-segment {segment!r} which is not allowed") + + result = path_result + if query_template is not None: + result += "?" + _interpolate(query_template, kwargs, _quote_query_part) + if fragment_template is not None: + result += "#" + _interpolate(fragment_template, kwargs, _quote_fragment_part) + + return result diff --git a/src/parallel/_utils/_utils.py b/src/parallel/_utils/_utils.py index ece4d87..569c62b 100644 --- a/src/parallel/_utils/_utils.py +++ b/src/parallel/_utils/_utils.py @@ -86,8 +86,9 @@ def _extract_items( index += 1 if is_dict(obj): try: - # We are at the last entry in the path so we must remove the field - if (len(path)) == index: + # Remove the field if there are no more dict keys in the path, + # only "" traversal markers or end. + if all(p == "" for p in path[index:]): item = obj.pop(key) else: item = obj[key] diff --git a/src/parallel/_version.py b/src/parallel/_version.py index 1b80dfe..e3da38f 100644 --- a/src/parallel/_version.py +++ b/src/parallel/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "parallel" -__version__ = "0.4.2" # x-release-please-version +__version__ = "0.5.0" # x-release-please-version diff --git a/src/parallel/resources/beta/api.md b/src/parallel/resources/beta/api.md index 0b56fe7..8652cf3 100644 --- a/src/parallel/resources/beta/api.md +++ b/src/parallel/resources/beta/api.md @@ -5,13 +5,13 @@ Types: ```python from parallel.types.beta import ( ExcerptSettings, - ExtractError, ExtractResponse, ExtractResult, - FetchPolicy, SearchResult, - UsageItem, WebSearchResult, + ExtractError, + FetchPolicy, + UsageItem, ) ``` diff --git a/src/parallel/resources/beta/beta.py b/src/parallel/resources/beta/beta.py index c2f4368..d32d2a6 100644 --- a/src/parallel/resources/beta/beta.py +++ b/src/parallel/resources/beta/beta.py @@ -45,8 +45,8 @@ from ...types.beta import beta_search_params, beta_extract_params from ..._base_client import make_request_options from ...types.beta.search_result import SearchResult +from ...types.fetch_policy_param import FetchPolicyParam from ...types.beta.extract_response import ExtractResponse -from ...types.beta.fetch_policy_param import FetchPolicyParam from ...types.beta.parallel_beta_param import ParallelBetaParam from ...types.beta.excerpt_settings_param import ExcerptSettingsParam from ...types.shared_params.source_policy import SourcePolicy @@ -61,19 +61,29 @@ def task_run(self) -> TaskRunResource: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ return TaskRunResource(self._client) @cached_property def task_group(self) -> TaskGroupResource: - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ return TaskGroupResource(self._client) @@ -185,6 +195,7 @@ def search( *, excerpts: ExcerptSettingsParam | Omit = omit, fetch_policy: Optional[FetchPolicyParam] | Omit = omit, + location: Optional[str] | Omit = omit, max_chars_per_result: Optional[int] | Omit = omit, max_results: Optional[int] | Omit = omit, mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit, @@ -208,6 +219,8 @@ def search( fetch_policy: Policy for live fetching web results. + location: ISO 3166-1 alpha-2 country code for geo-targeted search results. + max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead. max_results: Upper bound on the number of results to return. Defaults to 10 if not provided. @@ -262,6 +275,7 @@ def search( { "excerpts": excerpts, "fetch_policy": fetch_policy, + "location": location, "max_chars_per_result": max_chars_per_result, "max_results": max_results, "mode": mode, @@ -286,19 +300,29 @@ def task_run(self) -> AsyncTaskRunResource: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ return AsyncTaskRunResource(self._client) @cached_property def task_group(self) -> AsyncTaskGroupResource: - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ return AsyncTaskGroupResource(self._client) @@ -410,6 +434,7 @@ async def search( *, excerpts: ExcerptSettingsParam | Omit = omit, fetch_policy: Optional[FetchPolicyParam] | Omit = omit, + location: Optional[str] | Omit = omit, max_chars_per_result: Optional[int] | Omit = omit, max_results: Optional[int] | Omit = omit, mode: Optional[Literal["one-shot", "agentic", "fast"]] | Omit = omit, @@ -433,6 +458,8 @@ async def search( fetch_policy: Policy for live fetching web results. + location: ISO 3166-1 alpha-2 country code for geo-targeted search results. + max_chars_per_result: DEPRECATED: Use `excerpts.max_chars_per_result` instead. max_results: Upper bound on the number of results to return. Defaults to 10 if not provided. @@ -487,6 +514,7 @@ async def search( { "excerpts": excerpts, "fetch_policy": fetch_policy, + "location": location, "max_chars_per_result": max_chars_per_result, "max_results": max_results, "mode": mode, @@ -521,19 +549,29 @@ def task_run(self) -> TaskRunResourceWithRawResponse: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ return TaskRunResourceWithRawResponse(self._beta.task_run) @cached_property def task_group(self) -> TaskGroupResourceWithRawResponse: - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ return TaskGroupResourceWithRawResponse(self._beta.task_group) @@ -562,19 +600,29 @@ def task_run(self) -> AsyncTaskRunResourceWithRawResponse: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ return AsyncTaskRunResourceWithRawResponse(self._beta.task_run) @cached_property def task_group(self) -> AsyncTaskGroupResourceWithRawResponse: - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ return AsyncTaskGroupResourceWithRawResponse(self._beta.task_group) @@ -603,19 +651,29 @@ def task_run(self) -> TaskRunResourceWithStreamingResponse: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ return TaskRunResourceWithStreamingResponse(self._beta.task_run) @cached_property def task_group(self) -> TaskGroupResourceWithStreamingResponse: - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ return TaskGroupResourceWithStreamingResponse(self._beta.task_group) @@ -644,19 +702,29 @@ def task_run(self) -> AsyncTaskRunResourceWithStreamingResponse: Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ return AsyncTaskRunResourceWithStreamingResponse(self._beta.task_run) @cached_property def task_group(self) -> AsyncTaskGroupResourceWithStreamingResponse: - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ return AsyncTaskGroupResourceWithStreamingResponse(self._beta.task_group) diff --git a/src/parallel/resources/beta/findall.py b/src/parallel/resources/beta/findall.py index ef5ab71..acc3d98 100644 --- a/src/parallel/resources/beta/findall.py +++ b/src/parallel/resources/beta/findall.py @@ -9,7 +9,7 @@ import httpx from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform +from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -209,7 +209,7 @@ def retrieve( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return self._get( - f"/v1beta/findall/runs/{findall_id}", + path_template("/v1beta/findall/runs/{findall_id}", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -256,7 +256,7 @@ def cancel( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return self._post( - f"/v1beta/findall/runs/{findall_id}/cancel", + path_template("/v1beta/findall/runs/{findall_id}/cancel", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -312,7 +312,7 @@ def enrich( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return self._post( - f"/v1beta/findall/runs/{findall_id}/enrich", + path_template("/v1beta/findall/runs/{findall_id}/enrich", findall_id=findall_id), body=maybe_transform( { "output_schema": output_schema, @@ -375,7 +375,7 @@ def events( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return self._get( - f"/v1beta/findall/runs/{findall_id}/events", + path_template("/v1beta/findall/runs/{findall_id}/events", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -439,7 +439,7 @@ def extend( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return self._post( - f"/v1beta/findall/runs/{findall_id}/extend", + path_template("/v1beta/findall/runs/{findall_id}/extend", findall_id=findall_id), body=maybe_transform( {"additional_match_limit": additional_match_limit}, findall_extend_params.FindAllExtendParams ), @@ -542,7 +542,7 @@ def result( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return self._get( - f"/v1beta/findall/runs/{findall_id}/result", + path_template("/v1beta/findall/runs/{findall_id}/result", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -589,7 +589,7 @@ def schema( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return self._get( - f"/v1beta/findall/runs/{findall_id}/schema", + path_template("/v1beta/findall/runs/{findall_id}/schema", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -754,7 +754,7 @@ async def retrieve( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return await self._get( - f"/v1beta/findall/runs/{findall_id}", + path_template("/v1beta/findall/runs/{findall_id}", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -801,7 +801,7 @@ async def cancel( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return await self._post( - f"/v1beta/findall/runs/{findall_id}/cancel", + path_template("/v1beta/findall/runs/{findall_id}/cancel", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -857,7 +857,7 @@ async def enrich( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return await self._post( - f"/v1beta/findall/runs/{findall_id}/enrich", + path_template("/v1beta/findall/runs/{findall_id}/enrich", findall_id=findall_id), body=await async_maybe_transform( { "output_schema": output_schema, @@ -920,7 +920,7 @@ async def events( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return await self._get( - f"/v1beta/findall/runs/{findall_id}/events", + path_template("/v1beta/findall/runs/{findall_id}/events", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -984,7 +984,7 @@ async def extend( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return await self._post( - f"/v1beta/findall/runs/{findall_id}/extend", + path_template("/v1beta/findall/runs/{findall_id}/extend", findall_id=findall_id), body=await async_maybe_transform( {"additional_match_limit": additional_match_limit}, findall_extend_params.FindAllExtendParams ), @@ -1087,7 +1087,7 @@ async def result( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return await self._get( - f"/v1beta/findall/runs/{findall_id}/result", + path_template("/v1beta/findall/runs/{findall_id}/result", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -1134,7 +1134,7 @@ async def schema( } extra_headers = {"parallel-beta": "findall-2025-09-15", **(extra_headers or {})} return await self._get( - f"/v1beta/findall/runs/{findall_id}/schema", + path_template("/v1beta/findall/runs/{findall_id}/schema", findall_id=findall_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/parallel/resources/beta/task_group.py b/src/parallel/resources/beta/task_group.py index b15eab7..5ccb046 100644 --- a/src/parallel/resources/beta/task_group.py +++ b/src/parallel/resources/beta/task_group.py @@ -9,7 +9,7 @@ import httpx from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform +from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -38,14 +38,17 @@ class TaskGroupResource(SyncAPIResource): - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ @cached_property @@ -129,7 +132,7 @@ def retrieve( raise ValueError(f"Expected a non-empty value for `task_group_id` but received {task_group_id!r}") extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return self._get( - f"/v1beta/tasks/groups/{task_group_id}", + path_template("/v1beta/tasks/groups/{task_group_id}", task_group_id=task_group_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -141,6 +144,7 @@ def add_runs( task_group_id: str, *, inputs: Iterable[BetaRunInputParam], + refresh_status: bool | Omit = omit, default_task_spec: Optional[TaskSpecParam] | Omit = omit, betas: List[ParallelBetaParam] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -188,7 +192,7 @@ def add_runs( } extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return self._post( - f"/v1beta/tasks/groups/{task_group_id}/runs", + path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id), body=maybe_transform( { "inputs": inputs, @@ -197,7 +201,13 @@ def add_runs( task_group_add_runs_params.TaskGroupAddRunsParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + {"refresh_status": refresh_status}, task_group_add_runs_params.TaskGroupAddRunsParams + ), ), cast_to=TaskGroupRunResponse, ) @@ -235,7 +245,7 @@ def events( extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return self._get( - f"/v1beta/tasks/groups/{task_group_id}/events", + path_template("/v1beta/tasks/groups/{task_group_id}/events", task_group_id=task_group_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -300,7 +310,7 @@ def get_runs( extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return self._get( - f"/v1beta/tasks/groups/{task_group_id}/runs", + path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -325,14 +335,17 @@ def get_runs( class AsyncTaskGroupResource(AsyncAPIResource): - """ - The Task Group API is currently in beta and enables batch execution of many independent Task runs with group-level monitoring and failure handling. + """The Task API executes web research and extraction tasks. + + Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. + - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. - Submit hundreds or thousands of Tasks as a single group - Observe group progress and receive results as they complete - Real-time updates via Server-Sent Events (SSE) - Add tasks to an existing group while it is running - Group-level retry and error aggregation - Status: beta and subject to change. """ @cached_property @@ -416,7 +429,7 @@ async def retrieve( raise ValueError(f"Expected a non-empty value for `task_group_id` but received {task_group_id!r}") extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return await self._get( - f"/v1beta/tasks/groups/{task_group_id}", + path_template("/v1beta/tasks/groups/{task_group_id}", task_group_id=task_group_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -428,6 +441,7 @@ async def add_runs( task_group_id: str, *, inputs: Iterable[BetaRunInputParam], + refresh_status: bool | Omit = omit, default_task_spec: Optional[TaskSpecParam] | Omit = omit, betas: List[ParallelBetaParam] | Omit = omit, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -475,7 +489,7 @@ async def add_runs( } extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return await self._post( - f"/v1beta/tasks/groups/{task_group_id}/runs", + path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id), body=await async_maybe_transform( { "inputs": inputs, @@ -484,7 +498,13 @@ async def add_runs( task_group_add_runs_params.TaskGroupAddRunsParams, ), options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"refresh_status": refresh_status}, task_group_add_runs_params.TaskGroupAddRunsParams + ), ), cast_to=TaskGroupRunResponse, ) @@ -522,7 +542,7 @@ async def events( extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return await self._get( - f"/v1beta/tasks/groups/{task_group_id}/events", + path_template("/v1beta/tasks/groups/{task_group_id}/events", task_group_id=task_group_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -587,7 +607,7 @@ async def get_runs( extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return await self._get( - f"/v1beta/tasks/groups/{task_group_id}/runs", + path_template("/v1beta/tasks/groups/{task_group_id}/runs", task_group_id=task_group_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/parallel/resources/beta/task_run.py b/src/parallel/resources/beta/task_run.py index 3fb567c..9fd6752 100644 --- a/src/parallel/resources/beta/task_run.py +++ b/src/parallel/resources/beta/task_run.py @@ -8,7 +8,7 @@ import httpx from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from ..._utils import is_given, maybe_transform, strip_not_given, async_maybe_transform +from ..._utils import is_given, path_template, maybe_transform, strip_not_given, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -37,6 +37,13 @@ class TaskRunResource(SyncAPIResource): Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ @cached_property @@ -198,7 +205,7 @@ def events( extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return self._get( - f"/v1beta/tasks/runs/{run_id}/events", + path_template("/v1beta/tasks/runs/{run_id}/events", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -248,7 +255,7 @@ def result( } extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return self._get( - f"/v1/tasks/runs/{run_id}/result?beta=true", + path_template("/v1/tasks/runs/{run_id}/result?beta=true", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -265,6 +272,13 @@ class AsyncTaskRunResource(AsyncAPIResource): Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ @cached_property @@ -426,7 +440,7 @@ async def events( extra_headers = {"Accept": "text/event-stream", **(extra_headers or {})} extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return await self._get( - f"/v1beta/tasks/runs/{run_id}/events", + path_template("/v1beta/tasks/runs/{run_id}/events", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -476,7 +490,7 @@ async def result( } extra_headers = {"parallel-beta": "search-extract-2025-10-10", **(extra_headers or {})} return await self._get( - f"/v1/tasks/runs/{run_id}/result?beta=true", + path_template("/v1/tasks/runs/{run_id}/result?beta=true", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/parallel/resources/task_run.py b/src/parallel/resources/task_run.py index eb0df13..4efc8d9 100644 --- a/src/parallel/resources/task_run.py +++ b/src/parallel/resources/task_run.py @@ -11,7 +11,7 @@ from ..types import task_run_create_params, task_run_result_params from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given -from .._utils import maybe_transform, async_maybe_transform +from .._utils import path_template, maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import ( @@ -41,6 +41,13 @@ class TaskRunResource(SyncAPIResource): Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ @cached_property @@ -161,7 +168,7 @@ def retrieve( if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") return self._get( - f"/v1/tasks/runs/{run_id}", + path_template("/v1/tasks/runs/{run_id}", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -195,7 +202,7 @@ def result( if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") return self._get( - f"/v1/tasks/runs/{run_id}/result", + path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -340,6 +347,13 @@ class AsyncTaskRunResource(AsyncAPIResource): Clients submit a natural-language objective with an optional input schema; the service plans retrieval, fetches relevant URLs, and returns outputs that conform to a provided or inferred JSON schema. Supports deep research style queries and can return rich structured JSON outputs. Processors trade-off between cost, latency, and quality. Each processor supports calibrated confidences. - Output metadata: citations, excerpts, reasoning, and confidence per field + + Task Groups enable batch execution of many independent Task runs with group-level monitoring and failure handling. + - Submit hundreds or thousands of Tasks as a single group + - Observe group progress and receive results as they complete + - Real-time updates via Server-Sent Events (SSE) + - Add tasks to an existing group while it is running + - Group-level retry and error aggregation """ @cached_property @@ -460,7 +474,7 @@ async def retrieve( if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") return await self._get( - f"/v1/tasks/runs/{run_id}", + path_template("/v1/tasks/runs/{run_id}", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -494,7 +508,7 @@ async def result( if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") return await self._get( - f"/v1/tasks/runs/{run_id}/result", + path_template("/v1/tasks/runs/{run_id}/result", run_id=run_id), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/parallel/types/__init__.py b/src/parallel/types/__init__.py index 15d056e..526d252 100644 --- a/src/parallel/types/__init__.py +++ b/src/parallel/types/__init__.py @@ -11,17 +11,29 @@ from .citation import Citation as Citation from .task_run import TaskRun as TaskRun from .task_spec import TaskSpec as TaskSpec +from .usage_item import UsageItem as UsageItem from .auto_schema import AutoSchema as AutoSchema from .field_basis import FieldBasis as FieldBasis from .json_schema import JsonSchema as JsonSchema from .text_schema import TextSchema as TextSchema +from .extract_error import ExtractError as ExtractError +from .search_result import SearchResult as SearchResult +from .extract_result import ExtractResult as ExtractResult from .task_run_result import TaskRunResult as TaskRunResult from .task_spec_param import TaskSpecParam as TaskSpecParam +from .extract_response import ExtractResponse as ExtractResponse from .auto_schema_param import AutoSchemaParam as AutoSchemaParam from .json_schema_param import JsonSchemaParam as JsonSchemaParam from .text_schema_param import TextSchemaParam as TextSchemaParam +from .web_search_result import WebSearchResult as WebSearchResult +from .fetch_policy_param import FetchPolicyParam as FetchPolicyParam +from .client_search_params import ClientSearchParams as ClientSearchParams from .task_run_json_output import TaskRunJsonOutput as TaskRunJsonOutput from .task_run_text_output import TaskRunTextOutput as TaskRunTextOutput +from .client_extract_params import ClientExtractParams as ClientExtractParams +from .excerpt_settings_param import ExcerptSettingsParam as ExcerptSettingsParam from .parsed_task_run_result import ParsedTaskRunResult as ParsedTaskRunResult from .task_run_create_params import TaskRunCreateParams as TaskRunCreateParams from .task_run_result_params import TaskRunResultParams as TaskRunResultParams +from .advanced_search_settings_param import AdvancedSearchSettingsParam as AdvancedSearchSettingsParam +from .advanced_extract_settings_param import AdvancedExtractSettingsParam as AdvancedExtractSettingsParam diff --git a/src/parallel/types/advanced_extract_settings_param.py b/src/parallel/types/advanced_extract_settings_param.py new file mode 100644 index 0000000..00480c5 --- /dev/null +++ b/src/parallel/types/advanced_extract_settings_param.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Optional +from typing_extensions import TypeAlias, TypedDict + +from .fetch_policy_param import FetchPolicyParam +from .excerpt_settings_param import ExcerptSettingsParam + +__all__ = ["AdvancedExtractSettingsParam", "FullContent", "FullContentFullContentSettings"] + + +class FullContentFullContentSettings(TypedDict, total=False): + """Optional settings for returning full content.""" + + max_chars_per_result: Optional[int] + """ + Optional limit on the number of characters to include in the full content for + each url. Full content always starts at the beginning of the page and is + truncated at the limit if necessary. + """ + + +FullContent: TypeAlias = Union[FullContentFullContentSettings, bool] + + +class AdvancedExtractSettingsParam(TypedDict, total=False): + """Advanced extract configuration.""" + + excerpt_settings: Optional[ExcerptSettingsParam] + """Optional settings for returning relevant excerpts.""" + + fetch_policy: Optional[FetchPolicyParam] + """Policy for live fetching web results.""" + + full_content: FullContent + """Controls full content extraction. + + Set to true to enable with defaults, false to disable, or provide + FullContentSettings for fine-grained control. + """ diff --git a/src/parallel/types/advanced_search_settings_param.py b/src/parallel/types/advanced_search_settings_param.py new file mode 100644 index 0000000..fc4afd2 --- /dev/null +++ b/src/parallel/types/advanced_search_settings_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import TypedDict + +from .fetch_policy_param import FetchPolicyParam +from .excerpt_settings_param import ExcerptSettingsParam +from .shared_params.source_policy import SourcePolicy + +__all__ = ["AdvancedSearchSettingsParam"] + + +class AdvancedSearchSettingsParam(TypedDict, total=False): + """Advanced search configuration.""" + + excerpt_settings: Optional[ExcerptSettingsParam] + """Optional settings for returning relevant excerpts.""" + + fetch_policy: Optional[FetchPolicyParam] + """Policy for live fetching web results.""" + + location: Optional[str] + """ISO 3166-1 alpha-2 country code for geo-targeted search results.""" + + source_policy: Optional[SourcePolicy] + """Source policy for web search results. + + This policy governs which sources are allowed/disallowed in results. + """ diff --git a/src/parallel/types/beta/beta_extract_params.py b/src/parallel/types/beta/beta_extract_params.py index 7a87574..e4838ee 100644 --- a/src/parallel/types/beta/beta_extract_params.py +++ b/src/parallel/types/beta/beta_extract_params.py @@ -7,7 +7,7 @@ from ..._types import SequenceNotStr from ..._utils import PropertyInfo -from .fetch_policy_param import FetchPolicyParam +from ..fetch_policy_param import FetchPolicyParam from .parallel_beta_param import ParallelBetaParam from .excerpt_settings_param import ExcerptSettingsParam diff --git a/src/parallel/types/beta/beta_search_params.py b/src/parallel/types/beta/beta_search_params.py index 4a0776f..7e47fd7 100644 --- a/src/parallel/types/beta/beta_search_params.py +++ b/src/parallel/types/beta/beta_search_params.py @@ -7,7 +7,7 @@ from ..._types import SequenceNotStr from ..._utils import PropertyInfo -from .fetch_policy_param import FetchPolicyParam +from ..fetch_policy_param import FetchPolicyParam from .parallel_beta_param import ParallelBetaParam from .excerpt_settings_param import ExcerptSettingsParam from ..shared_params.source_policy import SourcePolicy @@ -22,6 +22,9 @@ class BetaSearchParams(TypedDict, total=False): fetch_policy: Optional[FetchPolicyParam] """Policy for live fetching web results.""" + location: Optional[str] + """ISO 3166-1 alpha-2 country code for geo-targeted search results.""" + max_chars_per_result: Optional[int] """DEPRECATED: Use `excerpts.max_chars_per_result` instead.""" diff --git a/src/parallel/types/beta/extract_error.py b/src/parallel/types/beta/extract_error.py index 0c8a19f..499920f 100644 --- a/src/parallel/types/beta/extract_error.py +++ b/src/parallel/types/beta/extract_error.py @@ -1,22 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional - -from ..._models import BaseModel +from .. import extract_error __all__ = ["ExtractError"] - -class ExtractError(BaseModel): - """Extract error details.""" - - content: Optional[str] = None - """Content returned for http client or server errors, if any.""" - - error_type: str - """Error type.""" - - http_status_code: Optional[int] = None - """HTTP status code, if available.""" - - url: str +ExtractError = extract_error.ExtractError diff --git a/src/parallel/types/beta/extract_response.py b/src/parallel/types/beta/extract_response.py index 45717bc..5fa3d4d 100644 --- a/src/parallel/types/beta/extract_response.py +++ b/src/parallel/types/beta/extract_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from .usage_item import UsageItem -from .extract_error import ExtractError +from ..usage_item import UsageItem +from ..extract_error import ExtractError from .extract_result import ExtractResult from ..shared.warning import Warning diff --git a/src/parallel/types/beta/fetch_policy_param.py b/src/parallel/types/beta/fetch_policy_param.py index 5bc4447..0949e76 100644 --- a/src/parallel/types/beta/fetch_policy_param.py +++ b/src/parallel/types/beta/fetch_policy_param.py @@ -2,26 +2,6 @@ from __future__ import annotations -from typing import Optional -from typing_extensions import TypedDict +from .. import fetch_policy_param -__all__ = ["FetchPolicyParam"] - - -class FetchPolicyParam(TypedDict, total=False): - """Policy for live fetching web results.""" - - disable_cache_fallback: bool - """ - If false, fallback to cached content older than max-age if live fetch fails or - times out. If true, returns an error instead. - """ - - max_age_seconds: Optional[int] - """Maximum age of cached content in seconds to trigger a live fetch. - - Minimum value 600 seconds (10 minutes). - """ - - timeout_seconds: Optional[float] - """Timeout in seconds for fetching live content if unavailable in cache.""" +FetchPolicyParam = fetch_policy_param.FetchPolicyParam diff --git a/src/parallel/types/beta/search_result.py b/src/parallel/types/beta/search_result.py index c7dd935..4c20ccb 100644 --- a/src/parallel/types/beta/search_result.py +++ b/src/parallel/types/beta/search_result.py @@ -3,7 +3,7 @@ from typing import List, Optional from ..._models import BaseModel -from .usage_item import UsageItem +from ..usage_item import UsageItem from ..shared.warning import Warning from .web_search_result import WebSearchResult diff --git a/src/parallel/types/beta/task_group_add_runs_params.py b/src/parallel/types/beta/task_group_add_runs_params.py index 5732934..cee578d 100644 --- a/src/parallel/types/beta/task_group_add_runs_params.py +++ b/src/parallel/types/beta/task_group_add_runs_params.py @@ -21,6 +21,8 @@ class TaskGroupAddRunsParams(TypedDict, total=False): split them across multiple TaskGroup POST requests. """ + refresh_status: bool + default_task_spec: Optional[TaskSpecParam] """Specification for a task. diff --git a/src/parallel/types/beta/usage_item.py b/src/parallel/types/beta/usage_item.py index b3584bd..587bb3c 100644 --- a/src/parallel/types/beta/usage_item.py +++ b/src/parallel/types/beta/usage_item.py @@ -1,15 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from ..._models import BaseModel +from .. import usage_item __all__ = ["UsageItem"] - -class UsageItem(BaseModel): - """Usage item for a single operation.""" - - count: int - """Count of the SKU.""" - - name: str - """Name of the SKU.""" +UsageItem = usage_item.UsageItem diff --git a/src/parallel/types/client_extract_params.py b/src/parallel/types/client_extract_params.py new file mode 100644 index 0000000..a53a4c6 --- /dev/null +++ b/src/parallel/types/client_extract_params.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Required, TypedDict + +from .._types import SequenceNotStr +from .advanced_extract_settings_param import AdvancedExtractSettingsParam + +__all__ = ["ClientExtractParams"] + + +class ClientExtractParams(TypedDict, total=False): + urls: Required[SequenceNotStr[str]] + """URLs to extract content from. Up to 20 URLs.""" + + advanced: Optional[AdvancedExtractSettingsParam] + """Advanced extract configuration.""" + + client_model: Optional[str] + """The model generating this request and consuming the results. + + Enables optimizations and tailors default settings for the model's capabilities. + """ + + max_chars_total: Optional[int] + """Upper bound on total characters across excerpts from all extracted results. + + Does not affect full_content if requested. Default is dynamic based on urls, + objective, and client_model. + """ + + objective: Optional[str] + """ + As in SearchRequest, a natural-language description of the underlying question + or goal driving the request. Used together with search_queries to focus excerpts + on the most relevant content. + """ + + search_queries: Optional[SequenceNotStr[str]] + """Optional keyword search queries, as in SearchRequest. + + Used together with objective to focus excerpts on the most relevant content. + """ diff --git a/src/parallel/types/client_search_params.py b/src/parallel/types/client_search_params.py new file mode 100644 index 0000000..82f8d0a --- /dev/null +++ b/src/parallel/types/client_search_params.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +from .._types import SequenceNotStr +from .advanced_search_settings_param import AdvancedSearchSettingsParam + +__all__ = ["ClientSearchParams"] + + +class ClientSearchParams(TypedDict, total=False): + search_queries: Required[SequenceNotStr[str]] + """Concise keyword search queries, 3-6 words each. + + At least one query is required, provide 2-3 for best results. Used together with + objective to focus results on the most relevant content. + """ + + advanced: Optional[AdvancedSearchSettingsParam] + """Advanced search configuration.""" + + client_model: Optional[str] + """The model generating this request and consuming the results. + + Enables optimizations and tailors default settings for the model's capabilities. + """ + + max_chars_total: Optional[int] + """Upper bound on total characters across excerpts from all results. + + Default is dynamic based on search_queries, objective, and client_model. + """ + + mode: Optional[Literal["basic", "standard"]] + """Search mode preset: supported values are basic and standard. + + Basic mode offers the lowest latency and works best with 2-3 high-quality + search_queries. Standard mode provides higher quality with more advanced + retrieval and compression. + """ + + objective: Optional[str] + """ + Natural-language description of the underlying question or goal driving the + search. Used together with search_queries to focus results on the most relevant + content. Should be self-contained with enough context to understand the intent + of the search. + """ diff --git a/src/parallel/types/excerpt_settings_param.py b/src/parallel/types/excerpt_settings_param.py new file mode 100644 index 0000000..17bd00e --- /dev/null +++ b/src/parallel/types/excerpt_settings_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import TypedDict + +__all__ = ["ExcerptSettingsParam"] + + +class ExcerptSettingsParam(TypedDict, total=False): + """Optional settings for returning relevant excerpts.""" + + max_chars_per_result: Optional[int] + """Optional upper bound on the total number of characters to include per url. + + Excerpts may contain fewer characters than this limit to maximize relevance and + token efficiency. Values below 1000 will be automatically set to 1000. + """ diff --git a/src/parallel/types/extract_error.py b/src/parallel/types/extract_error.py new file mode 100644 index 0000000..3379cb6 --- /dev/null +++ b/src/parallel/types/extract_error.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["ExtractError"] + + +class ExtractError(BaseModel): + """Extract error details.""" + + content: Optional[str] = None + """Content returned for http client or server errors, if any.""" + + error_type: str + """Error type.""" + + http_status_code: Optional[int] = None + """HTTP status code, if available.""" + + url: str diff --git a/src/parallel/types/extract_response.py b/src/parallel/types/extract_response.py new file mode 100644 index 0000000..8d2830b --- /dev/null +++ b/src/parallel/types/extract_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .usage_item import UsageItem +from .extract_error import ExtractError +from .extract_result import ExtractResult +from .shared.warning import Warning + +__all__ = ["ExtractResponse"] + + +class ExtractResponse(BaseModel): + """Extract response (GA).""" + + errors: List[ExtractError] + """Extract errors: requested URLs not in the results.""" + + extract_id: str + """Extract request ID, e.g. `extract_cad0a6d2dec046bd95ae900527d880e7`""" + + results: List[ExtractResult] + """Successful extract results.""" + + usage: Optional[List[UsageItem]] = None + """Usage metrics for the extract request.""" + + warnings: Optional[List[Warning]] = None + """Warnings for the extract request, if any.""" diff --git a/src/parallel/types/extract_result.py b/src/parallel/types/extract_result.py new file mode 100644 index 0000000..e02243b --- /dev/null +++ b/src/parallel/types/extract_result.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel + +__all__ = ["ExtractResult"] + + +class ExtractResult(BaseModel): + """Extract result for a single URL.""" + + excerpts: List[str] + """Relevant excerpted content from the URL, formatted as markdown.""" + + url: str + """URL associated with the search result.""" + + full_content: Optional[str] = None + """Full content from the URL formatted as markdown, if requested.""" + + publish_date: Optional[str] = None + """Publish date of the webpage in YYYY-MM-DD format, if available.""" + + title: Optional[str] = None + """Title of the webpage, if available.""" diff --git a/src/parallel/types/fetch_policy_param.py b/src/parallel/types/fetch_policy_param.py new file mode 100644 index 0000000..5bc4447 --- /dev/null +++ b/src/parallel/types/fetch_policy_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import TypedDict + +__all__ = ["FetchPolicyParam"] + + +class FetchPolicyParam(TypedDict, total=False): + """Policy for live fetching web results.""" + + disable_cache_fallback: bool + """ + If false, fallback to cached content older than max-age if live fetch fails or + times out. If true, returns an error instead. + """ + + max_age_seconds: Optional[int] + """Maximum age of cached content in seconds to trigger a live fetch. + + Minimum value 600 seconds (10 minutes). + """ + + timeout_seconds: Optional[float] + """Timeout in seconds for fetching live content if unavailable in cache.""" diff --git a/src/parallel/types/search_result.py b/src/parallel/types/search_result.py new file mode 100644 index 0000000..c7e3a4f --- /dev/null +++ b/src/parallel/types/search_result.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .usage_item import UsageItem +from .shared.warning import Warning +from .web_search_result import WebSearchResult + +__all__ = ["SearchResult"] + + +class SearchResult(BaseModel): + """Search response (GA).""" + + results: List[WebSearchResult] + """A list of search results, ordered by decreasing relevance.""" + + search_id: str + """Search ID. Example: `search_cad0a6d2dec046bd95ae900527d880e7`""" + + usage: Optional[List[UsageItem]] = None + """Usage metrics for the search request.""" + + warnings: Optional[List[Warning]] = None + """Warnings for the search request, if any.""" diff --git a/src/parallel/types/usage_item.py b/src/parallel/types/usage_item.py new file mode 100644 index 0000000..471d112 --- /dev/null +++ b/src/parallel/types/usage_item.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .._models import BaseModel + +__all__ = ["UsageItem"] + + +class UsageItem(BaseModel): + """Usage item for a single operation.""" + + count: int + """Count of the SKU.""" + + name: str + """Name of the SKU.""" diff --git a/src/parallel/types/web_search_result.py b/src/parallel/types/web_search_result.py new file mode 100644 index 0000000..6178b6e --- /dev/null +++ b/src/parallel/types/web_search_result.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel + +__all__ = ["WebSearchResult"] + + +class WebSearchResult(BaseModel): + """A single search result from the web search API.""" + + excerpts: List[str] + """Relevant excerpted content from the URL, formatted as markdown.""" + + url: str + """URL associated with the search result.""" + + publish_date: Optional[str] = None + """Publish date of the webpage in YYYY-MM-DD format, if available.""" + + title: Optional[str] = None + """Title of the webpage, if available.""" diff --git a/tests/api_resources/beta/test_findall.py b/tests/api_resources/beta/test_findall.py index 6ee829d..18996a9 100644 --- a/tests/api_resources/beta/test_findall.py +++ b/tests/api_resources/beta/test_findall.py @@ -296,7 +296,6 @@ def test_path_params_enrich(self, client: Parallel) -> None: }, ) - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_method_events(self, client: Parallel) -> None: findall_stream = client.beta.findall.events( @@ -304,7 +303,6 @@ def test_method_events(self, client: Parallel) -> None: ) findall_stream.response.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_method_events_with_all_params(self, client: Parallel) -> None: findall_stream = client.beta.findall.events( @@ -315,7 +313,6 @@ def test_method_events_with_all_params(self, client: Parallel) -> None: ) findall_stream.response.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_raw_response_events(self, client: Parallel) -> None: response = client.beta.findall.with_raw_response.events( @@ -326,7 +323,6 @@ def test_raw_response_events(self, client: Parallel) -> None: stream = response.parse() stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_streaming_response_events(self, client: Parallel) -> None: with client.beta.findall.with_streaming_response.events( @@ -340,7 +336,6 @@ def test_streaming_response_events(self, client: Parallel) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_path_params_events(self, client: Parallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"): @@ -811,7 +806,6 @@ async def test_path_params_enrich(self, async_client: AsyncParallel) -> None: }, ) - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_method_events(self, async_client: AsyncParallel) -> None: findall_stream = await async_client.beta.findall.events( @@ -819,7 +813,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None: ) await findall_stream.response.aclose() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None: findall_stream = await async_client.beta.findall.events( @@ -830,7 +823,6 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel) ) await findall_stream.response.aclose() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_raw_response_events(self, async_client: AsyncParallel) -> None: response = await async_client.beta.findall.with_raw_response.events( @@ -841,7 +833,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None: stream = await response.parse() await stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_streaming_response_events(self, async_client: AsyncParallel) -> None: async with async_client.beta.findall.with_streaming_response.events( @@ -855,7 +846,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_path_params_events(self, async_client: AsyncParallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `findall_id` but received ''"): diff --git a/tests/api_resources/beta/test_task_group.py b/tests/api_resources/beta/test_task_group.py index cc200ce..a9c4e42 100644 --- a/tests/api_resources/beta/test_task_group.py +++ b/tests/api_resources/beta/test_task_group.py @@ -147,6 +147,7 @@ def test_method_add_runs_with_all_params(self, client: Parallel) -> None: }, } ], + refresh_status=True, default_task_spec={ "output_schema": { "json_schema": { @@ -212,7 +213,6 @@ def test_path_params_add_runs(self, client: Parallel) -> None: ], ) - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_method_events(self, client: Parallel) -> None: task_group_stream = client.beta.task_group.events( @@ -220,7 +220,6 @@ def test_method_events(self, client: Parallel) -> None: ) task_group_stream.response.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_method_events_with_all_params(self, client: Parallel) -> None: task_group_stream = client.beta.task_group.events( @@ -230,7 +229,6 @@ def test_method_events_with_all_params(self, client: Parallel) -> None: ) task_group_stream.response.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_raw_response_events(self, client: Parallel) -> None: response = client.beta.task_group.with_raw_response.events( @@ -241,7 +239,6 @@ def test_raw_response_events(self, client: Parallel) -> None: stream = response.parse() stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_streaming_response_events(self, client: Parallel) -> None: with client.beta.task_group.with_streaming_response.events( @@ -255,7 +252,6 @@ def test_streaming_response_events(self, client: Parallel) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_path_params_events(self, client: Parallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"): @@ -263,7 +259,6 @@ def test_path_params_events(self, client: Parallel) -> None: task_group_id="", ) - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_method_get_runs(self, client: Parallel) -> None: task_group_stream = client.beta.task_group.get_runs( @@ -271,7 +266,6 @@ def test_method_get_runs(self, client: Parallel) -> None: ) task_group_stream.response.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_method_get_runs_with_all_params(self, client: Parallel) -> None: task_group_stream = client.beta.task_group.get_runs( @@ -283,7 +277,6 @@ def test_method_get_runs_with_all_params(self, client: Parallel) -> None: ) task_group_stream.response.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_raw_response_get_runs(self, client: Parallel) -> None: response = client.beta.task_group.with_raw_response.get_runs( @@ -294,7 +287,6 @@ def test_raw_response_get_runs(self, client: Parallel) -> None: stream = response.parse() stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_streaming_response_get_runs(self, client: Parallel) -> None: with client.beta.task_group.with_streaming_response.get_runs( @@ -308,7 +300,6 @@ def test_streaming_response_get_runs(self, client: Parallel) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_path_params_get_runs(self, client: Parallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"): @@ -448,6 +439,7 @@ async def test_method_add_runs_with_all_params(self, async_client: AsyncParallel }, } ], + refresh_status=True, default_task_spec={ "output_schema": { "json_schema": { @@ -513,7 +505,6 @@ async def test_path_params_add_runs(self, async_client: AsyncParallel) -> None: ], ) - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_method_events(self, async_client: AsyncParallel) -> None: task_group_stream = await async_client.beta.task_group.events( @@ -521,7 +512,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None: ) await task_group_stream.response.aclose() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_method_events_with_all_params(self, async_client: AsyncParallel) -> None: task_group_stream = await async_client.beta.task_group.events( @@ -531,7 +521,6 @@ async def test_method_events_with_all_params(self, async_client: AsyncParallel) ) await task_group_stream.response.aclose() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_raw_response_events(self, async_client: AsyncParallel) -> None: response = await async_client.beta.task_group.with_raw_response.events( @@ -542,7 +531,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None: stream = await response.parse() await stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_streaming_response_events(self, async_client: AsyncParallel) -> None: async with async_client.beta.task_group.with_streaming_response.events( @@ -556,7 +544,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_path_params_events(self, async_client: AsyncParallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"): @@ -564,7 +551,6 @@ async def test_path_params_events(self, async_client: AsyncParallel) -> None: task_group_id="", ) - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_method_get_runs(self, async_client: AsyncParallel) -> None: task_group_stream = await async_client.beta.task_group.get_runs( @@ -572,7 +558,6 @@ async def test_method_get_runs(self, async_client: AsyncParallel) -> None: ) await task_group_stream.response.aclose() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel) -> None: task_group_stream = await async_client.beta.task_group.get_runs( @@ -584,7 +569,6 @@ async def test_method_get_runs_with_all_params(self, async_client: AsyncParallel ) await task_group_stream.response.aclose() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None: response = await async_client.beta.task_group.with_raw_response.get_runs( @@ -595,7 +579,6 @@ async def test_raw_response_get_runs(self, async_client: AsyncParallel) -> None: stream = await response.parse() await stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_streaming_response_get_runs(self, async_client: AsyncParallel) -> None: async with async_client.beta.task_group.with_streaming_response.get_runs( @@ -609,7 +592,6 @@ async def test_streaming_response_get_runs(self, async_client: AsyncParallel) -> assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_path_params_get_runs(self, async_client: AsyncParallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `task_group_id` but received ''"): diff --git a/tests/api_resources/beta/test_task_run.py b/tests/api_resources/beta/test_task_run.py index 794846d..c28344a 100644 --- a/tests/api_resources/beta/test_task_run.py +++ b/tests/api_resources/beta/test_task_run.py @@ -95,7 +95,6 @@ def test_streaming_response_create(self, client: Parallel) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_method_events(self, client: Parallel) -> None: task_run_stream = client.beta.task_run.events( @@ -103,7 +102,6 @@ def test_method_events(self, client: Parallel) -> None: ) task_run_stream.response.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_raw_response_events(self, client: Parallel) -> None: response = client.beta.task_run.with_raw_response.events( @@ -114,7 +112,6 @@ def test_raw_response_events(self, client: Parallel) -> None: stream = response.parse() stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_streaming_response_events(self, client: Parallel) -> None: with client.beta.task_run.with_streaming_response.events( @@ -128,7 +125,6 @@ def test_streaming_response_events(self, client: Parallel) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize def test_path_params_events(self, client: Parallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): @@ -265,7 +261,6 @@ async def test_streaming_response_create(self, async_client: AsyncParallel) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_method_events(self, async_client: AsyncParallel) -> None: task_run_stream = await async_client.beta.task_run.events( @@ -273,7 +268,6 @@ async def test_method_events(self, async_client: AsyncParallel) -> None: ) await task_run_stream.response.aclose() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_raw_response_events(self, async_client: AsyncParallel) -> None: response = await async_client.beta.task_run.with_raw_response.events( @@ -284,7 +278,6 @@ async def test_raw_response_events(self, async_client: AsyncParallel) -> None: stream = await response.parse() await stream.close() - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_streaming_response_events(self, async_client: AsyncParallel) -> None: async with async_client.beta.task_run.with_streaming_response.events( @@ -298,7 +291,6 @@ async def test_streaming_response_events(self, async_client: AsyncParallel) -> N assert cast(Any, response.is_closed) is True - @pytest.mark.skip(reason="Mock server doesn't support text/event-stream responses") @parametrize async def test_path_params_events(self, async_client: AsyncParallel) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): diff --git a/tests/api_resources/test_beta.py b/tests/api_resources/test_beta.py index 1c98ae1..e3198c5 100644 --- a/tests/api_resources/test_beta.py +++ b/tests/api_resources/test_beta.py @@ -10,10 +10,7 @@ from parallel import Parallel, AsyncParallel from tests.utils import assert_matches_type from parallel._utils import parse_date -from parallel.types.beta import ( - SearchResult, - ExtractResponse, -) +from parallel.types.beta import SearchResult, ExtractResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -86,6 +83,7 @@ def test_method_search_with_all_params(self, client: Parallel) -> None: "max_age_seconds": 86400, "timeout_seconds": 60, }, + location="us", max_chars_per_result=0, max_results=0, mode="one-shot", @@ -192,6 +190,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncParallel) "max_age_seconds": 86400, "timeout_seconds": 60, }, + location="us", max_chars_per_result=0, max_results=0, mode="one-shot", diff --git a/tests/api_resources/test_client.py b/tests/api_resources/test_client.py new file mode 100644 index 0000000..f0bc6a0 --- /dev/null +++ b/tests/api_resources/test_client.py @@ -0,0 +1,242 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from parallel import Parallel, AsyncParallel +from tests.utils import assert_matches_type +from parallel.types import ( + SearchResult, + ExtractResponse, +) +from parallel._utils import parse_date + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestClient: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_extract(self, client: Parallel) -> None: + client_ = client.extract( + urls=["string"], + ) + assert_matches_type(ExtractResponse, client_, path=["response"]) + + @parametrize + def test_method_extract_with_all_params(self, client: Parallel) -> None: + client_ = client.extract( + urls=["string"], + advanced={ + "excerpt_settings": {"max_chars_per_result": 0}, + "fetch_policy": { + "disable_cache_fallback": True, + "max_age_seconds": 86400, + "timeout_seconds": 60, + }, + "full_content": {"max_chars_per_result": 0}, + }, + client_model="claude-sonnet-4-6-20260401", + max_chars_total=0, + objective="objective", + search_queries=["string"], + ) + assert_matches_type(ExtractResponse, client_, path=["response"]) + + @parametrize + def test_raw_response_extract(self, client: Parallel) -> None: + response = client.with_raw_response.extract( + urls=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + client_ = response.parse() + assert_matches_type(ExtractResponse, client_, path=["response"]) + + @parametrize + def test_streaming_response_extract(self, client: Parallel) -> None: + with client.with_streaming_response.extract( + urls=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + client_ = response.parse() + assert_matches_type(ExtractResponse, client_, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_search(self, client: Parallel) -> None: + client_ = client.search( + search_queries=["string"], + ) + assert_matches_type(SearchResult, client_, path=["response"]) + + @parametrize + def test_method_search_with_all_params(self, client: Parallel) -> None: + client_ = client.search( + search_queries=["string"], + advanced={ + "excerpt_settings": {"max_chars_per_result": 0}, + "fetch_policy": { + "disable_cache_fallback": True, + "max_age_seconds": 86400, + "timeout_seconds": 60, + }, + "location": "us", + "source_policy": { + "after_date": parse_date("2024-01-01"), + "exclude_domains": ["reddit.com", "x.com", ".ai"], + "include_domains": ["wikipedia.org", "usa.gov", ".edu"], + }, + }, + client_model="claude-sonnet-4-6-20260401", + max_chars_total=0, + mode="basic", + objective="objective", + ) + assert_matches_type(SearchResult, client_, path=["response"]) + + @parametrize + def test_raw_response_search(self, client: Parallel) -> None: + response = client.with_raw_response.search( + search_queries=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + client_ = response.parse() + assert_matches_type(SearchResult, client_, path=["response"]) + + @parametrize + def test_streaming_response_search(self, client: Parallel) -> None: + with client.with_streaming_response.search( + search_queries=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + client_ = response.parse() + assert_matches_type(SearchResult, client_, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncClient: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_extract(self, async_client: AsyncParallel) -> None: + client = await async_client.extract( + urls=["string"], + ) + assert_matches_type(ExtractResponse, client, path=["response"]) + + @parametrize + async def test_method_extract_with_all_params(self, async_client: AsyncParallel) -> None: + client = await async_client.extract( + urls=["string"], + advanced={ + "excerpt_settings": {"max_chars_per_result": 0}, + "fetch_policy": { + "disable_cache_fallback": True, + "max_age_seconds": 86400, + "timeout_seconds": 60, + }, + "full_content": {"max_chars_per_result": 0}, + }, + client_model="claude-sonnet-4-6-20260401", + max_chars_total=0, + objective="objective", + search_queries=["string"], + ) + assert_matches_type(ExtractResponse, client, path=["response"]) + + @parametrize + async def test_raw_response_extract(self, async_client: AsyncParallel) -> None: + response = await async_client.with_raw_response.extract( + urls=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + client = await response.parse() + assert_matches_type(ExtractResponse, client, path=["response"]) + + @parametrize + async def test_streaming_response_extract(self, async_client: AsyncParallel) -> None: + async with async_client.with_streaming_response.extract( + urls=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + client = await response.parse() + assert_matches_type(ExtractResponse, client, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_search(self, async_client: AsyncParallel) -> None: + client = await async_client.search( + search_queries=["string"], + ) + assert_matches_type(SearchResult, client, path=["response"]) + + @parametrize + async def test_method_search_with_all_params(self, async_client: AsyncParallel) -> None: + client = await async_client.search( + search_queries=["string"], + advanced={ + "excerpt_settings": {"max_chars_per_result": 0}, + "fetch_policy": { + "disable_cache_fallback": True, + "max_age_seconds": 86400, + "timeout_seconds": 60, + }, + "location": "us", + "source_policy": { + "after_date": parse_date("2024-01-01"), + "exclude_domains": ["reddit.com", "x.com", ".ai"], + "include_domains": ["wikipedia.org", "usa.gov", ".edu"], + }, + }, + client_model="claude-sonnet-4-6-20260401", + max_chars_total=0, + mode="basic", + objective="objective", + ) + assert_matches_type(SearchResult, client, path=["response"]) + + @parametrize + async def test_raw_response_search(self, async_client: AsyncParallel) -> None: + response = await async_client.with_raw_response.search( + search_queries=["string"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + client = await response.parse() + assert_matches_type(SearchResult, client, path=["response"]) + + @parametrize + async def test_streaming_response_search(self, async_client: AsyncParallel) -> None: + async with async_client.with_streaming_response.search( + search_queries=["string"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + client = await response.parse() + assert_matches_type(SearchResult, client, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/test_client.py b/tests/test_client.py index c2b772f..5e022bd 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -429,6 +429,30 @@ def test_default_query_option(self) -> None: client.close() + def test_hardcoded_query_params_in_url(self, client: Parallel) -> None: + request = client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true")) + url = httpx.URL(request.url) + assert dict(url.params) == {"beta": "true"} + + request = client._build_request( + FinalRequestOptions( + method="get", + url="/foo?beta=true", + params={"limit": "10", "page": "abc"}, + ) + ) + url = httpx.URL(request.url) + assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"} + + request = client._build_request( + FinalRequestOptions( + method="get", + url="/files/a%2Fb?beta=true", + params={"limit": "10"}, + ) + ) + assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10" + def test_request_extra_json(self, client: Parallel) -> None: request = client._build_request( FinalRequestOptions( @@ -1334,6 +1358,30 @@ async def test_default_query_option(self) -> None: await client.close() + async def test_hardcoded_query_params_in_url(self, async_client: AsyncParallel) -> None: + request = async_client._build_request(FinalRequestOptions(method="get", url="/foo?beta=true")) + url = httpx.URL(request.url) + assert dict(url.params) == {"beta": "true"} + + request = async_client._build_request( + FinalRequestOptions( + method="get", + url="/foo?beta=true", + params={"limit": "10", "page": "abc"}, + ) + ) + url = httpx.URL(request.url) + assert dict(url.params) == {"beta": "true", "limit": "10", "page": "abc"} + + request = async_client._build_request( + FinalRequestOptions( + method="get", + url="/files/a%2Fb?beta=true", + params={"limit": "10"}, + ) + ) + assert request.url.raw_path == b"/files/a%2Fb?beta=true&limit=10" + def test_request_extra_json(self, client: Parallel) -> None: request = client._build_request( FinalRequestOptions( diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index ad0eca3..d9bb4c0 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -35,6 +35,15 @@ def test_multiple_files() -> None: assert query == {"documents": [{}, {}]} +def test_top_level_file_array() -> None: + query = {"files": [b"file one", b"file two"], "title": "hello"} + assert extract_files(query, paths=[["files", ""]]) == [ + ("files[]", b"file one"), + ("files[]", b"file two"), + ] + assert query == {"title": "hello"} + + @pytest.mark.parametrize( "query,paths,expected", [ diff --git a/tests/test_utils/test_path.py b/tests/test_utils/test_path.py new file mode 100644 index 0000000..c0364ff --- /dev/null +++ b/tests/test_utils/test_path.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from typing import Any + +import pytest + +from parallel._utils._path import path_template + + +@pytest.mark.parametrize( + "template, kwargs, expected", + [ + ("/v1/{id}", dict(id="abc"), "/v1/abc"), + ("/v1/{a}/{b}", dict(a="x", b="y"), "/v1/x/y"), + ("/v1/{a}{b}/path/{c}?val={d}#{e}", dict(a="x", b="y", c="z", d="u", e="v"), "/v1/xy/path/z?val=u#v"), + ("/{w}/{w}", dict(w="echo"), "/echo/echo"), + ("/v1/static", {}, "/v1/static"), + ("", {}, ""), + ("/v1/?q={n}&count=10", dict(n=42), "/v1/?q=42&count=10"), + ("/v1/{v}", dict(v=None), "/v1/null"), + ("/v1/{v}", dict(v=True), "/v1/true"), + ("/v1/{v}", dict(v=False), "/v1/false"), + ("/v1/{v}", dict(v=".hidden"), "/v1/.hidden"), # dot prefix ok + ("/v1/{v}", dict(v="file.txt"), "/v1/file.txt"), # dot in middle ok + ("/v1/{v}", dict(v="..."), "/v1/..."), # triple dot ok + ("/v1/{a}{b}", dict(a=".", b="txt"), "/v1/.txt"), # dot var combining with adjacent to be ok + ("/items?q={v}#{f}", dict(v=".", f=".."), "/items?q=.#.."), # dots in query/fragment are fine + ( + "/v1/{a}?query={b}", + dict(a="../../other/endpoint", b="a&bad=true"), + "/v1/..%2F..%2Fother%2Fendpoint?query=a%26bad%3Dtrue", + ), + ("/v1/{val}", dict(val="a/b/c"), "/v1/a%2Fb%2Fc"), + ("/v1/{val}", dict(val="a/b/c?query=value"), "/v1/a%2Fb%2Fc%3Fquery=value"), + ("/v1/{val}", dict(val="a/b/c?query=value&bad=true"), "/v1/a%2Fb%2Fc%3Fquery=value&bad=true"), + ("/v1/{val}", dict(val="%20"), "/v1/%2520"), # escapes escape sequences in input + # Query: slash and ? are safe, # is not + ("/items?q={v}", dict(v="a/b"), "/items?q=a/b"), + ("/items?q={v}", dict(v="a?b"), "/items?q=a?b"), + ("/items?q={v}", dict(v="a#b"), "/items?q=a%23b"), + ("/items?q={v}", dict(v="a b"), "/items?q=a%20b"), + # Fragment: slash and ? are safe + ("/docs#{v}", dict(v="a/b"), "/docs#a/b"), + ("/docs#{v}", dict(v="a?b"), "/docs#a?b"), + # Path: slash, ? and # are all encoded + ("/v1/{v}", dict(v="a/b"), "/v1/a%2Fb"), + ("/v1/{v}", dict(v="a?b"), "/v1/a%3Fb"), + ("/v1/{v}", dict(v="a#b"), "/v1/a%23b"), + # same var encoded differently by component + ( + "/v1/{v}?q={v}#{v}", + dict(v="a/b?c#d"), + "/v1/a%2Fb%3Fc%23d?q=a/b?c%23d#a/b?c%23d", + ), + ("/v1/{val}", dict(val="x?admin=true"), "/v1/x%3Fadmin=true"), # query injection + ("/v1/{val}", dict(val="x#admin"), "/v1/x%23admin"), # fragment injection + ], +) +def test_interpolation(template: str, kwargs: dict[str, Any], expected: str) -> None: + assert path_template(template, **kwargs) == expected + + +def test_missing_kwarg_raises_key_error() -> None: + with pytest.raises(KeyError, match="org_id"): + path_template("/v1/{org_id}") + + +@pytest.mark.parametrize( + "template, kwargs", + [ + ("{a}/path", dict(a=".")), + ("{a}/path", dict(a="..")), + ("/v1/{a}", dict(a=".")), + ("/v1/{a}", dict(a="..")), + ("/v1/{a}/path", dict(a=".")), + ("/v1/{a}/path", dict(a="..")), + ("/v1/{a}{b}", dict(a=".", b=".")), # adjacent vars → ".." + ("/v1/{a}.", dict(a=".")), # var + static → ".." + ("/v1/{a}{b}", dict(a="", b=".")), # empty + dot → "." + ("/v1/%2e/{x}", dict(x="ok")), # encoded dot in static text + ("/v1/%2e./{x}", dict(x="ok")), # mixed encoded ".." in static + ("/v1/.%2E/{x}", dict(x="ok")), # mixed encoded ".." in static + ("/v1/{v}?q=1", dict(v="..")), + ("/v1/{v}#frag", dict(v="..")), + ], +) +def test_dot_segment_rejected(template: str, kwargs: dict[str, Any]) -> None: + with pytest.raises(ValueError, match="dot-segment"): + path_template(template, **kwargs)